summaryrefslogtreecommitdiff
path: root/storage/bdb/test
diff options
context:
space:
mode:
authorunknown <jimw@mysql.com>2005-12-05 10:27:46 -0800
committerunknown <jimw@mysql.com>2005-12-05 10:27:46 -0800
commit22635260195fdf4ac4dda87f6f25a220864d2ac3 (patch)
tree3641c8cb9ca67df1e23950e3bc7cfd233b63af0c /storage/bdb/test
parentbaa61d747faae7e6e5a8627f359e1d7c8ced477b (diff)
downloadmariadb-git-22635260195fdf4ac4dda87f6f25a220864d2ac3.tar.gz
Import BDB 4.4.16import-bdb-4.4.16
BitKeeper/deleted/.del-BerkeleyDB.wsp~262cad82bbee330e: Delete: storage/bdb/build_vxworks/BerkeleyDB.wsp BitKeeper/deleted/.del-db_deadlock.c~e66b27126703863b: Delete: storage/bdb/build_vxworks/db_deadlock/db_deadlock.c BitKeeper/deleted/.del-README~e7bed2181d0721fc: Delete: storage/bdb/build_vxworks/dbdemo/README BitKeeper/deleted/.del-checkapi.prl~9cff93a620c4d6ee: Delete: storage/bdb/libdb_java/checkapi.prl BitKeeper/deleted/.del-com_sleepycat_db_Db.h~5be8fdbd82dc66ff: Delete: storage/bdb/libdb_java/com_sleepycat_db_Db.h BitKeeper/deleted/.del-com_sleepycat_db_DbEnv.h~561e86491f216398: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h BitKeeper/deleted/.del-com_sleepycat_db_DbLock.h~99691cceb677fb03: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbLock.h BitKeeper/deleted/.del-com_sleepycat_db_DbLogc.h~285d0bf42cc42e7: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h BitKeeper/deleted/.del-com_sleepycat_db_DbLsn.h~3541030a6ddecbce: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h BitKeeper/deleted/.del-com_sleepycat_db_DbTxn.h~93b5284933625666: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h BitKeeper/deleted/.del-com_sleepycat_db_DbUtil.h~7e57707ff193506b: Delete: storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h BitKeeper/deleted/.del-com_sleepycat_db_Dbc.h~ada851088fc835cf: Delete: storage/bdb/libdb_java/com_sleepycat_db_Dbc.h BitKeeper/deleted/.del-com_sleepycat_db_Dbt.h~a0f565bbacdbef6f: Delete: storage/bdb/libdb_java/com_sleepycat_db_Dbt.h BitKeeper/deleted/.del-com_sleepycat_db_xa_DbXAResource.h~422d8d531f84b16d: Delete: storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h BitKeeper/deleted/.del-java_Db.c~58bb1812dfe5854b: Delete: storage/bdb/libdb_java/java_Db.c BitKeeper/deleted/.del-java_DbEnv.c~a2df07a3a8eb0a3: Delete: storage/bdb/libdb_java/java_DbEnv.c BitKeeper/deleted/.del-java_DbLock.c~14752f3d17ce5091: Delete: storage/bdb/libdb_java/java_DbLock.c BitKeeper/deleted/.del-java_DbLogc.c~32b9f5a0d8ae00e1: Delete: storage/bdb/libdb_java/java_DbLogc.c BitKeeper/deleted/.del-java_DbLsn.c~397bce0c2e9acfae: Delete: storage/bdb/libdb_java/java_DbLsn.c BitKeeper/deleted/.del-java_DbTxn.c~d9dc40ebae01406c: Delete: storage/bdb/libdb_java/java_DbTxn.c BitKeeper/deleted/.del-java_DbUtil.c~f5ec02c726cdc2c8: Delete: storage/bdb/libdb_java/java_DbUtil.c BitKeeper/deleted/.del-java_DbXAResource.c~2975bdce5f7ded66: Delete: storage/bdb/libdb_java/java_DbXAResource.c BitKeeper/deleted/.del-java_Dbc.c~c2bfc34d203f2b41: Delete: storage/bdb/libdb_java/java_Dbc.c BitKeeper/deleted/.del-java_Dbt.c~f24bd7e6d3dea272: Delete: storage/bdb/libdb_java/java_Dbt.c BitKeeper/deleted/.del-java_info.c~94759fff93603d0c: Delete: storage/bdb/libdb_java/java_info.c BitKeeper/deleted/.del-java_info.h~3ca48c25797cc075: Delete: storage/bdb/libdb_java/java_info.h BitKeeper/deleted/.del-java_locked.c~c9804f625a17266f: Delete: storage/bdb/libdb_java/java_locked.c BitKeeper/deleted/.del-java_locked.h~106fa45bdca6557: Delete: storage/bdb/libdb_java/java_locked.h BitKeeper/deleted/.del-java_util.c~62c948a9959d3314: Delete: storage/bdb/libdb_java/java_util.c BitKeeper/deleted/.del-java_util.h~a5a6e3e7658a0698: Delete: storage/bdb/libdb_java/java_util.h BitKeeper/deleted/.del-os_vx_abs.c~c4cb2766797c82c: Delete: storage/bdb/os_vxworks/os_vx_abs.c BitKeeper/deleted/.del-os_vx_config.c~2547fc9b4b798ec9: Delete: storage/bdb/os_vxworks/os_vx_config.c BitKeeper/deleted/.del-os_vx_map.c~dde2102a8138f39a: Delete: storage/bdb/os_vxworks/os_vx_map.c BitKeeper/deleted/.del-client.c~778f726f92c4b8c1: Delete: storage/bdb/rpc_client/client.c BitKeeper/deleted/.del-gen_client_ret.c~baf03786112281b1: Delete: storage/bdb/rpc_client/gen_client_ret.c BitKeeper/deleted/.del-clsrv.html~bedbc23a6299a5ef: Delete: storage/bdb/rpc_server/clsrv.html BitKeeper/deleted/.del-rpc.src~5b16d10e77c12a07: Delete: storage/bdb/rpc_server/rpc.src BitKeeper/deleted/.del-db_server_proc.c.in~8f21c610b1adde21: Delete: storage/bdb/rpc_server/c/db_server_proc.c.in BitKeeper/deleted/.del-db_server_util.c~ba8fb3cb91ce91a2: Delete: storage/bdb/rpc_server/c/db_server_util.c BitKeeper/deleted/.del-db_server_cxxproc.cpp~2654e937e22057b8: Delete: storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp BitKeeper/deleted/.del-db_server_cxxutil.cpp~479fef66225389c3: Delete: storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp BitKeeper/deleted/.del-DbDispatcher.java~fa17538a88d55d31: Delete: storage/bdb/rpc_server/java/DbDispatcher.java BitKeeper/deleted/.del-DbServer.java~f9e1d547eb31538: Delete: storage/bdb/rpc_server/java/DbServer.java BitKeeper/deleted/.del-FreeList.java~45b070457c62935c: Delete: storage/bdb/rpc_server/java/FreeList.java BitKeeper/deleted/.del-LocalIterator.java~8cb6df07b817589: Delete: storage/bdb/rpc_server/java/LocalIterator.java BitKeeper/deleted/.del-README~ba12e6afe01eccdb: Delete: storage/bdb/rpc_server/java/README BitKeeper/deleted/.del-RpcDb.java~f3a512764051fc91: Delete: storage/bdb/rpc_server/java/RpcDb.java BitKeeper/deleted/.del-RpcDbEnv.java~30a49486a552aebe: Delete: storage/bdb/rpc_server/java/RpcDbEnv.java BitKeeper/deleted/.del-RpcDbTxn.java~1488264fee2347f0: Delete: storage/bdb/rpc_server/java/RpcDbTxn.java BitKeeper/deleted/.del-RpcDbc.java~e3dee34ecd9224c: Delete: storage/bdb/rpc_server/java/RpcDbc.java BitKeeper/deleted/.del-Timer.java~eac869c015ad1fc2: Delete: storage/bdb/rpc_server/java/Timer.java BitKeeper/deleted/.del-jrpcgen.jar~96b782a68a231bd: Delete: storage/bdb/rpc_server/java/jrpcgen.jar BitKeeper/deleted/.del-oncrpc.jar~f0588659d0f723c: Delete: storage/bdb/rpc_server/java/oncrpc.jar BitKeeper/deleted/.del-DbServerStub.java~412a16b22ae914c5: Delete: storage/bdb/rpc_server/java/gen/DbServerStub.java BitKeeper/deleted/.del-s_jrpcgen~eea48620d17035ff: Delete: storage/bdb/rpc_server/java/s_jrpcgen BitKeeper/deleted/.del-__db_associate_msg.java~ead380cad8a83405: Delete: storage/bdb/rpc_server/java/gen/__db_associate_msg.java BitKeeper/deleted/.del-__db_associate_reply.java~50abd1ffed116bfc: Delete: storage/bdb/rpc_server/java/gen/__db_associate_reply.java BitKeeper/deleted/.del-__db_bt_maxkey_msg.java~9870e67c8420d97e: Delete: storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java BitKeeper/deleted/.del-__db_bt_maxkey_reply.java~25c4caf1c0b1917a: Delete: storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java BitKeeper/deleted/.del-__db_bt_minkey_msg.java~7eb5c14fb24b9d6a: Delete: storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java BitKeeper/deleted/.del-__db_bt_minkey_reply.java~bca9c5ea81c46760: Delete: storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java BitKeeper/deleted/.del-__db_close_msg.java~f04ee69a1ba56835: Delete: storage/bdb/rpc_server/java/gen/__db_close_msg.java BitKeeper/deleted/.del-__db_close_reply.java~b28cc0d86864f4e4: Delete: storage/bdb/rpc_server/java/gen/__db_close_reply.java BitKeeper/deleted/.del-__db_create_msg.java~1aaa9e535b899a04: Delete: storage/bdb/rpc_server/java/gen/__db_create_msg.java BitKeeper/deleted/.del-__db_create_reply.java~4d1ec958b6e49be8: Delete: storage/bdb/rpc_server/java/gen/__db_create_reply.java BitKeeper/deleted/.del-__db_cursor_msg.java~cf76a6ca7b0a55c1: Delete: storage/bdb/rpc_server/java/gen/__db_cursor_msg.java BitKeeper/deleted/.del-__db_cursor_reply.java~f001474e68713f66: Delete: storage/bdb/rpc_server/java/gen/__db_cursor_reply.java BitKeeper/deleted/.del-__db_del_msg.java~941d3f21685abda5: Delete: storage/bdb/rpc_server/java/gen/__db_del_msg.java BitKeeper/deleted/.del-__db_del_reply.java~86d1a74e7667c534: Delete: storage/bdb/rpc_server/java/gen/__db_del_reply.java BitKeeper/deleted/.del-__db_encrypt_msg.java~e471e1131c7c9991: Delete: storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java BitKeeper/deleted/.del-__db_encrypt_reply.java~77eaf465989f374f: Delete: storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java BitKeeper/deleted/.del-__db_extentsize_msg.java~98b41250d80d7f25: Delete: storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java BitKeeper/deleted/.del-__db_extentsize_reply.java~5a19d472b4c78471: Delete: storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java BitKeeper/deleted/.del-__db_flags_msg.java~441eed476fac3b75: Delete: storage/bdb/rpc_server/java/gen/__db_flags_msg.java BitKeeper/deleted/.del-__db_flags_reply.java~fbe276e35c025e85: Delete: storage/bdb/rpc_server/java/gen/__db_flags_reply.java BitKeeper/deleted/.del-__db_get_msg.java~5862039845a2e82e: Delete: storage/bdb/rpc_server/java/gen/__db_get_msg.java BitKeeper/deleted/.del-__db_get_reply.java~13a5b37f6e1c75a6: Delete: storage/bdb/rpc_server/java/gen/__db_get_reply.java BitKeeper/deleted/.del-__db_h_ffactor_msg.java~3ae99a79bbc32ace: Delete: storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java BitKeeper/deleted/.del-__db_h_ffactor_reply.java~49fa9e30aa29cf15: Delete: storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java BitKeeper/deleted/.del-__db_h_nelem_msg.java~a74540e24c2376ac: Delete: storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java BitKeeper/deleted/.del-__db_h_nelem_reply.java~fdefe84280309a69: Delete: storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java BitKeeper/deleted/.del-__db_join_msg.java~bb9bae2ff960548: Delete: storage/bdb/rpc_server/java/gen/__db_join_msg.java BitKeeper/deleted/.del-__db_join_reply.java~938be1f7a7fa1d8d: Delete: storage/bdb/rpc_server/java/gen/__db_join_reply.java BitKeeper/deleted/.del-__db_key_range_msg.java~c8d5cc3c2dc970d4: Delete: storage/bdb/rpc_server/java/gen/__db_key_range_msg.java BitKeeper/deleted/.del-__db_key_range_reply.java~92b341e0314f22d9: Delete: storage/bdb/rpc_server/java/gen/__db_key_range_reply.java BitKeeper/deleted/.del-__db_lorder_msg.java~db0ee150bd760a09: Delete: storage/bdb/rpc_server/java/gen/__db_lorder_msg.java BitKeeper/deleted/.del-__db_lorder_reply.java~e22ba9a03ff14a74: Delete: storage/bdb/rpc_server/java/gen/__db_lorder_reply.java BitKeeper/deleted/.del-__db_open_msg.java~dc63c84feda071ee: Delete: storage/bdb/rpc_server/java/gen/__db_open_msg.java BitKeeper/deleted/.del-__db_open_reply.java~cc17c92d41c40: Delete: storage/bdb/rpc_server/java/gen/__db_open_reply.java BitKeeper/deleted/.del-__db_pagesize_msg.java~448d1d04d3c7220f: Delete: storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java BitKeeper/deleted/.del-__db_pagesize_reply.java~cddff625518b077c: Delete: storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java BitKeeper/deleted/.del-__db_pget_msg.java~d47b18162af92a04: Delete: storage/bdb/rpc_server/java/gen/__db_pget_msg.java BitKeeper/deleted/.del-__db_pget_reply.java~51e8ec0bb4a42ddb: Delete: storage/bdb/rpc_server/java/gen/__db_pget_reply.java BitKeeper/deleted/.del-__db_put_msg.java~11d62e0be1df9770: Delete: storage/bdb/rpc_server/java/gen/__db_put_msg.java BitKeeper/deleted/.del-__db_put_reply.java~237edfe6e16b98dc: Delete: storage/bdb/rpc_server/java/gen/__db_put_reply.java BitKeeper/deleted/.del-__db_re_delim_msg.java~6f1d8808ae1268f7: Delete: storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java BitKeeper/deleted/.del-__db_re_delim_reply.java~9b3748da6051f58f: Delete: storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java BitKeeper/deleted/.del-__db_re_len_msg.java~b81150167375308c: Delete: storage/bdb/rpc_server/java/gen/__db_re_len_msg.java BitKeeper/deleted/.del-__db_re_len_reply.java~cd4a1c758256deed: Delete: storage/bdb/rpc_server/java/gen/__db_re_len_reply.java BitKeeper/deleted/.del-__db_re_pad_msg.java~bf9899217833aab7: Delete: storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java BitKeeper/deleted/.del-__db_re_pad_reply.java~1617988ae3b4ed7: Delete: storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java BitKeeper/deleted/.del-__db_remove_msg.java~6500258dda02905: Delete: storage/bdb/rpc_server/java/gen/__db_remove_msg.java BitKeeper/deleted/.del-__db_remove_reply.java~2f120860c28c2147: Delete: storage/bdb/rpc_server/java/gen/__db_remove_reply.java BitKeeper/deleted/.del-__db_rename_msg.java~e257f5621523c010: Delete: storage/bdb/rpc_server/java/gen/__db_rename_msg.java BitKeeper/deleted/.del-__db_rename_reply.java~3d9b0d71ddef1e4f: Delete: storage/bdb/rpc_server/java/gen/__db_rename_reply.java BitKeeper/deleted/.del-__db_stat_msg.java~18777f8abf4c6db7: Delete: storage/bdb/rpc_server/java/gen/__db_stat_msg.java BitKeeper/deleted/.del-__db_stat_reply.java~c65aa70ee0b3fa4b: Delete: storage/bdb/rpc_server/java/gen/__db_stat_reply.java BitKeeper/deleted/.del-__db_sync_msg.java~2d528efa77b52f8e: Delete: storage/bdb/rpc_server/java/gen/__db_sync_msg.java BitKeeper/deleted/.del-__db_sync_reply.java~4618b14ca6182d1c: Delete: storage/bdb/rpc_server/java/gen/__db_sync_reply.java BitKeeper/deleted/.del-__db_truncate_msg.java~7f0b791efc19d01: Delete: storage/bdb/rpc_server/java/gen/__db_truncate_msg.java BitKeeper/deleted/.del-__db_truncate_reply.java~6b546b15b67c4b10: Delete: storage/bdb/rpc_server/java/gen/__db_truncate_reply.java BitKeeper/deleted/.del-__dbc_close_msg.java~dec9c28a443eded7: Delete: storage/bdb/rpc_server/java/gen/__dbc_close_msg.java BitKeeper/deleted/.del-__dbc_close_reply.java~936e3d08683e1807: Delete: storage/bdb/rpc_server/java/gen/__dbc_close_reply.java BitKeeper/deleted/.del-__dbc_count_msg.java~596f6137c3cbb9f: Delete: storage/bdb/rpc_server/java/gen/__dbc_count_msg.java BitKeeper/deleted/.del-__dbc_count_reply.java~2f9ae9c77e69db07: Delete: storage/bdb/rpc_server/java/gen/__dbc_count_reply.java BitKeeper/deleted/.del-__dbc_del_msg.java~e3ee262c41809935: Delete: storage/bdb/rpc_server/java/gen/__dbc_del_msg.java BitKeeper/deleted/.del-__dbc_del_reply.java~b30ad669b593c26e: Delete: storage/bdb/rpc_server/java/gen/__dbc_del_reply.java BitKeeper/deleted/.del-__dbc_dup_msg.java~a507a9f28cb94fd9: Delete: storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java BitKeeper/deleted/.del-__dbc_dup_reply.java~e14ee0a887f36027: Delete: storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java BitKeeper/deleted/.del-__dbc_get_msg.java~38d851bc7f22434f: Delete: storage/bdb/rpc_server/java/gen/__dbc_get_msg.java BitKeeper/deleted/.del-__dbc_get_reply.java~a2c3a3c61d891a43: Delete: storage/bdb/rpc_server/java/gen/__dbc_get_reply.java BitKeeper/deleted/.del-__dbc_pget_msg.java~68be7ca3574c35e8: Delete: storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java BitKeeper/deleted/.del-__dbc_pget_reply.java~d7a142e0d03d215: Delete: storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java BitKeeper/deleted/.del-__dbc_put_msg.java~8ffa5f73f1dc24a8: Delete: storage/bdb/rpc_server/java/gen/__dbc_put_msg.java BitKeeper/deleted/.del-__dbc_put_reply.java~fa591c2170712742: Delete: storage/bdb/rpc_server/java/gen/__dbc_put_reply.java BitKeeper/deleted/.del-__env_cachesize_msg.java~e1617ac675f39ff9: Delete: storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java BitKeeper/deleted/.del-__env_cachesize_reply.java~46d61203dc67225a: Delete: storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java BitKeeper/deleted/.del-__env_close_msg.java~2b7cf5642c11b060: Delete: storage/bdb/rpc_server/java/gen/__env_close_msg.java BitKeeper/deleted/.del-__env_close_reply.java~e7e5a0bd1ac10bb5: Delete: storage/bdb/rpc_server/java/gen/__env_close_reply.java BitKeeper/deleted/.del-__env_create_msg.java~a232d9b338470735: Delete: storage/bdb/rpc_server/java/gen/__env_create_msg.java BitKeeper/deleted/.del-__env_create_reply.java~349471db68b992c: Delete: storage/bdb/rpc_server/java/gen/__env_create_reply.java BitKeeper/deleted/.del-__env_dbremove_msg.java~e4fcb5d31ea42840: Delete: storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java BitKeeper/deleted/.del-__env_dbremove_reply.java~6ad639e29ee2c24d: Delete: storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java BitKeeper/deleted/.del-__env_dbrename_msg.java~e4082b7dda94a15e: Delete: storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java BitKeeper/deleted/.del-__env_dbrename_reply.java~a514af5890b9c792: Delete: storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java BitKeeper/deleted/.del-__env_encrypt_msg.java~68541e7234a74fff: Delete: storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java BitKeeper/deleted/.del-__env_encrypt_reply.java~3acfdefa98f6e4ad: Delete: storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java BitKeeper/deleted/.del-__env_flags_msg.java~5868dc483eaea72d: Delete: storage/bdb/rpc_server/java/gen/__env_flags_msg.java BitKeeper/deleted/.del-__env_flags_reply.java~665b55b6cf470004: Delete: storage/bdb/rpc_server/java/gen/__env_flags_reply.java BitKeeper/deleted/.del-__env_open_msg.java~aabfb33c7b09cf55: Delete: storage/bdb/rpc_server/java/gen/__env_open_msg.java BitKeeper/deleted/.del-__env_open_reply.java~7cdc7e4f38510648: Delete: storage/bdb/rpc_server/java/gen/__env_open_reply.java BitKeeper/deleted/.del-__env_remove_msg.java~6a21875bb076d162: Delete: storage/bdb/rpc_server/java/gen/__env_remove_msg.java BitKeeper/deleted/.del-__env_remove_reply.java~76663da926423372: Delete: storage/bdb/rpc_server/java/gen/__env_remove_reply.java BitKeeper/deleted/.del-__txn_abort_msg.java~f8e890f75d0f694b: Delete: storage/bdb/rpc_server/java/gen/__txn_abort_msg.java BitKeeper/deleted/.del-__txn_abort_reply.java~678ad0b6363c2758: Delete: storage/bdb/rpc_server/java/gen/__txn_abort_reply.java BitKeeper/deleted/.del-__txn_begin_msg.java~b783c040ab7afcb1: Delete: storage/bdb/rpc_server/java/gen/__txn_begin_msg.java BitKeeper/deleted/.del-__txn_begin_reply.java~92bff8b98e06b023: Delete: storage/bdb/rpc_server/java/gen/__txn_begin_reply.java BitKeeper/deleted/.del-__txn_commit_msg.java~5a40acc93f71e243: Delete: storage/bdb/rpc_server/java/gen/__txn_commit_msg.java BitKeeper/deleted/.del-__txn_commit_reply.java~f532e87a8f8ba8a8: Delete: storage/bdb/rpc_server/java/gen/__txn_commit_reply.java BitKeeper/deleted/.del-__txn_discard_msg.java~cf3b52abd646bbe: Delete: storage/bdb/rpc_server/java/gen/__txn_discard_msg.java BitKeeper/deleted/.del-__txn_discard_reply.java~695b92637d8c6107: Delete: storage/bdb/rpc_server/java/gen/__txn_discard_reply.java BitKeeper/deleted/.del-__txn_prepare_msg.java~9547b20ee838b37f: Delete: storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java BitKeeper/deleted/.del-__txn_prepare_reply.java~2c9e2f8435b960cd: Delete: storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java BitKeeper/deleted/.del-__txn_recover_msg.java~e8b91e7a491bf424: Delete: storage/bdb/rpc_server/java/gen/__txn_recover_msg.java BitKeeper/deleted/.del-__txn_recover_reply.java~4548cbca6b058d06: Delete: storage/bdb/rpc_server/java/gen/__txn_recover_reply.java BitKeeper/deleted/.del-db_server.java~7da469d45426b310: Delete: storage/bdb/rpc_server/java/gen/db_server.java BitKeeper/deleted/.del-Btree.pm~d7ddd8ca99da1884: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-Hash.pm~6c3032fb51011864: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pm~b36f7b4b21d6a326: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~cf47ec2bb9d97c80: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-BerkeleyDB.pod.P~be77a6f219f7803f: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~46038b9ecc2b0994: Delete: storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~51eceef1511d2b8e: Delete: storage/bdb/perl/BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~82a35c39f9f9dc92: Delete: storage/bdb/perl/BerkeleyDB/MANIFEST BitKeeper/deleted/.del-META.yml~5d2eb91de2ace77: Delete: storage/bdb/perl/BerkeleyDB/META.yml BitKeeper/deleted/.del-Makefile.PL~6c30b40064dc93bc: Delete: storage/bdb/perl/BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~2c9091895ce880ad: Delete: storage/bdb/perl/BerkeleyDB/README BitKeeper/deleted/.del-Todo~31c9e469be7d7fce: Delete: storage/bdb/perl/BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~5ee173677818dc79: Delete: storage/bdb/perl/BerkeleyDB/config.in BitKeeper/deleted/.del-constants.h~ef5ed100dd4398b9: Delete: storage/bdb/perl/BerkeleyDB/constants.h BitKeeper/deleted/.del-constants.xs~9f261dece7663f4: Delete: storage/bdb/perl/BerkeleyDB/constants.xs BitKeeper/deleted/.del-dbinfo~143868bf1fee3891: Delete: storage/bdb/perl/BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~d6db2129cd65a044: Delete: storage/bdb/perl/BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~86cf6dafc3259b14: Delete: storage/bdb/perl/BerkeleyDB/mkpod BitKeeper/deleted/.del-ppport.h~e8e959489bac6b4e: Delete: storage/bdb/perl/BerkeleyDB/ppport.h BitKeeper/deleted/.del-scan~d41c8bfdde29e0ee: Delete: storage/bdb/perl/BerkeleyDB/scan BitKeeper/deleted/.del-dec_osf.pl~bb1dd3f7343acea1: Delete: storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl BitKeeper/deleted/.del-typemap~bc62fd16304b335c: Delete: storage/bdb/perl/BerkeleyDB/typemap BitKeeper/deleted/.del-irix_6_5.pl~6cf1332ac7c6d177: Delete: storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-5.004~80083b2782fae3b1: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-solaris.pl~b7440db23153ada1: Delete: storage/bdb/perl/BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-5.004_01~5e653651e8e3819: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~1a8749849460471d: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~e3fefc28c49ad553: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~2acad6626738297f: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~740d60296a10e2b6: Delete: storage/bdb/perl/BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~4df32971d111b075: Delete: storage/bdb/perl/BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~fc87edb6772b5b6c: Delete: storage/bdb/perl/BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~31d0796db6079b6a: Delete: storage/bdb/perl/BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~a9e6b8287f9f0348: Delete: storage/bdb/perl/BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~48068342df311cef: Delete: storage/bdb/perl/BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e009d5c64dabc628: Delete: storage/bdb/perl/BerkeleyDB/t/btree.t BitKeeper/deleted/.del-cds.t~6393f9b74adda854: Delete: storage/bdb/perl/BerkeleyDB/t/cds.t BitKeeper/deleted/.del-destroy.t~aed694cd9448ef26: Delete: storage/bdb/perl/BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-encrypt.t~91e1152aa288384d: Delete: storage/bdb/perl/BerkeleyDB/t/encrypt.t BitKeeper/deleted/.del-env.t~c1aa03091db9670: Delete: storage/bdb/perl/BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~b1e3ca3da78b9898: Delete: storage/bdb/perl/BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~c0f55d47f279a7e2: Delete: storage/bdb/perl/BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~452caaaefd5c648b: Delete: storage/bdb/perl/BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~f712dfeec05867fb: Delete: storage/bdb/perl/BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~48b7f05a43464f11: Delete: storage/bdb/perl/BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~92eb3848961b229b: Delete: storage/bdb/perl/BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~256f4a5c41c79938: Delete: storage/bdb/perl/BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~8189f0dc9b4ef4: Delete: storage/bdb/perl/BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-pod.t~544de536d7c16d1f: Delete: storage/bdb/perl/BerkeleyDB/t/pod.t BitKeeper/deleted/.del-queue.t~c9c560769cc1cb7b: Delete: storage/bdb/perl/BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~28b76050831a3cae: Delete: storage/bdb/perl/BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~20b543351fd50fc0: Delete: storage/bdb/perl/BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~56324aaeb936c7f1: Delete: storage/bdb/perl/BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~546ac62364616115: Delete: storage/bdb/perl/BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~2f5dc0ece63926c4: Delete: storage/bdb/perl/BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-util.pm~27e29eb4fddd71a5: Delete: storage/bdb/perl/BerkeleyDB/t/util.pm BitKeeper/deleted/.del-Changes~f0e3f260c5b81da4: Delete: storage/bdb/perl/DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~108149cc66802405: Delete: storage/bdb/perl/DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~8c61b34efd458ffc: Delete: storage/bdb/perl/DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~74e05e841c69424: Delete: storage/bdb/perl/DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~5048670bca9a416: Delete: storage/bdb/perl/DB_File/MANIFEST BitKeeper/deleted/.del-META.yml~99d0c37ae405a9bd: Delete: storage/bdb/perl/DB_File/META.yml BitKeeper/deleted/.del-Makefile.PL~18275ea2225dbf0c: Delete: storage/bdb/perl/DB_File/Makefile.PL BitKeeper/deleted/.del-README~9ca1ac49d099e79a: Delete: storage/bdb/perl/DB_File/README BitKeeper/deleted/.del-config.in~fac3c512af0f372a: Delete: storage/bdb/perl/DB_File/config.in BitKeeper/deleted/.del-dbinfo~9de945c9960a5600: Delete: storage/bdb/perl/DB_File/dbinfo BitKeeper/deleted/.del-fallback.h~9975a2248666cd3e: Delete: storage/bdb/perl/DB_File/fallback.h BitKeeper/deleted/.del-fallback.xs~20972a102becd17c: Delete: storage/bdb/perl/DB_File/fallback.xs BitKeeper/deleted/.del-ppport.h~b24a0fced14b654: Delete: storage/bdb/perl/DB_File/ppport.h BitKeeper/deleted/.del-typemap~5581c211c9caa676: Delete: storage/bdb/perl/DB_File/typemap BitKeeper/deleted/.del-dynixptx.pl~fe52539bc8f324ba: Delete: storage/bdb/perl/DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-version.c~5d8c4ea020639989: Delete: storage/bdb/perl/DB_File/version.c BitKeeper/deleted/.del-sco.pl~3d5472171a399180: Delete: storage/bdb/perl/DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_01~ff446a68fbd4f0b8: Delete: storage/bdb/perl/DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004~93f4db16a08fd2bc: Delete: storage/bdb/perl/DB_File/patches/5.004 BitKeeper/deleted/.del-5.004_02~82bb39971394b02a: Delete: storage/bdb/perl/DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~b7c43469f9885933: Delete: storage/bdb/perl/DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~24f77fc8ce4c05a4: Delete: storage/bdb/perl/DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~edb5ae874e40ac5: Delete: storage/bdb/perl/DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~71d4c109691053f2: Delete: storage/bdb/perl/DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~eb5fede99787c288: Delete: storage/bdb/perl/DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~4cd3657759345990: Delete: storage/bdb/perl/DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~18145a5d8ddd06f: Delete: storage/bdb/perl/DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~3697abb57c4fd3b9: Delete: storage/bdb/perl/DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~154014b7530d8be3: Delete: storage/bdb/perl/DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~19bb665f32275d9a: Delete: storage/bdb/perl/DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~8a56bdfc2f73371: Delete: storage/bdb/perl/DB_File/t/db-recno.t BitKeeper/deleted/.del-tcl_compat.c~5a8010d160060a98: Delete: storage/bdb/tcl/tcl_compat.c BitKeeper/deleted/.del-tcl_db.c~8fdaaad2aeeda35e: Delete: storage/bdb/tcl/tcl_db.c BitKeeper/deleted/.del-tcl_db_pkg.c~2f0f287f3831577c: Delete: storage/bdb/tcl/tcl_db_pkg.c BitKeeper/deleted/.del-tcl_dbcursor.c~625ad62ba96af2ed: Delete: storage/bdb/tcl/tcl_dbcursor.c BitKeeper/deleted/.del-tcl_env.c~4f598ca35d3c4f8b: Delete: storage/bdb/tcl/tcl_env.c BitKeeper/deleted/.del-tcl_internal.c~f6ca168017fd06e1: Delete: storage/bdb/tcl/tcl_internal.c BitKeeper/deleted/.del-tcl_lock.c~5a2c6bd823eef42c: Delete: storage/bdb/tcl/tcl_lock.c BitKeeper/deleted/.del-tcl_log.c~a8a8d90da56a5e07: Delete: storage/bdb/tcl/tcl_log.c BitKeeper/deleted/.del-tcl_mp.c~22565406c6a6462b: Delete: storage/bdb/tcl/tcl_mp.c BitKeeper/deleted/.del-tcl_rep.c~e6566d8441250794: Delete: storage/bdb/tcl/tcl_rep.c BitKeeper/deleted/.del-tcl_seq.c~c38c7f7d22abc6de: Delete: storage/bdb/tcl/tcl_seq.c BitKeeper/deleted/.del-tcl_txn.c~e8729bea575a01bb: Delete: storage/bdb/tcl/tcl_txn.c BitKeeper/deleted/.del-tcl_util.c~32bd9f706e204afd: Delete: storage/bdb/tcl/tcl_util.c BitKeeper/deleted/.del-db.html~2cea96773f8bc0a9: Delete: storage/bdb/tcl/docs/db.html BitKeeper/deleted/.del-env.html~882c3ffaf3ea10a6: Delete: storage/bdb/tcl/docs/env.html BitKeeper/deleted/.del-historic.html~27efa50d7af02fa0: Delete: storage/bdb/tcl/docs/historic.html BitKeeper/deleted/.del-index.html~27e6b3e9ca7eb3bc: Delete: storage/bdb/tcl/docs/index.html BitKeeper/deleted/.del-library.html~d60be859a6d3fdbb: Delete: storage/bdb/tcl/docs/library.html BitKeeper/deleted/.del-lock.html~121ccf43b1822008: Delete: storage/bdb/tcl/docs/lock.html BitKeeper/deleted/.del-log.html~d51c4a05bac8893f: Delete: storage/bdb/tcl/docs/log.html BitKeeper/deleted/.del-mpool.html~cad6eba280ff4db1: Delete: storage/bdb/tcl/docs/mpool.html BitKeeper/deleted/.del-rep.html~56203335b51b864a: Delete: storage/bdb/tcl/docs/rep.html BitKeeper/deleted/.del-test.html~987c2f02707fbaa6: Delete: storage/bdb/tcl/docs/test.html BitKeeper/deleted/.del-txn.html~696a0a46ef941059: Delete: storage/bdb/tcl/docs/txn.html BitKeeper/deleted/.del-archive.tcl~8e8c452487960455: Delete: storage/bdb/test/archive.tcl BitKeeper/deleted/.del-bigfile001.tcl~899a31b17c2dc7e2: Delete: storage/bdb/test/bigfile001.tcl BitKeeper/deleted/.del-bigfile002.tcl~3ff5b1b29cc2d9f0: Delete: storage/bdb/test/bigfile002.tcl BitKeeper/deleted/.del-byteorder.tcl~fa49cd99b5ef4a27: Delete: storage/bdb/test/byteorder.tcl BitKeeper/deleted/.del-conscript.tcl~52f54459d6ba9e1f: Delete: storage/bdb/test/conscript.tcl BitKeeper/deleted/.del-dbm.tcl~fc57479c1349169c: Delete: storage/bdb/test/dbm.tcl BitKeeper/deleted/.del-dbscript.tcl~808ff0b03a79a48a: Delete: storage/bdb/test/dbscript.tcl BitKeeper/deleted/.del-ddoyscript.tcl~a84d00bcc3e22889: Delete: storage/bdb/test/ddoyscript.tcl BitKeeper/deleted/.del-ddscript.tcl~2fa930b65d65212b: Delete: storage/bdb/test/ddscript.tcl BitKeeper/deleted/.del-dead001.tcl~b8ee6deab2cf41: Delete: storage/bdb/test/dead001.tcl BitKeeper/deleted/.del-dead002.tcl~4ac413316f37d12: Delete: storage/bdb/test/dead002.tcl BitKeeper/deleted/.del-dead003.tcl~ef79ddad4ad28e2: Delete: storage/bdb/test/dead003.tcl BitKeeper/deleted/.del-dead004.tcl~45fb32cae919f872: Delete: storage/bdb/test/dead004.tcl BitKeeper/deleted/.del-dead005.tcl~cb0208bc8e023367: Delete: storage/bdb/test/dead005.tcl BitKeeper/deleted/.del-dead006.tcl~70e8875ef45eede8: Delete: storage/bdb/test/dead006.tcl BitKeeper/deleted/.del-dead007.tcl~55aae8b9c7d421fa: Delete: storage/bdb/test/dead007.tcl BitKeeper/deleted/.del-env001.tcl~99979288b7da80c8: Delete: storage/bdb/test/env001.tcl BitKeeper/deleted/.del-env002.tcl~5debc4ae93c72d68: Delete: storage/bdb/test/env002.tcl BitKeeper/deleted/.del-env003.tcl~121fcf8dc7778d34: Delete: storage/bdb/test/env003.tcl BitKeeper/deleted/.del-env004.tcl~e7807309994bded4: Delete: storage/bdb/test/env004.tcl BitKeeper/deleted/.del-env005.tcl~e4f000e9c61b43d1: Delete: storage/bdb/test/env005.tcl BitKeeper/deleted/.del-env006.tcl~92493fc7761a9865: Delete: storage/bdb/test/env006.tcl BitKeeper/deleted/.del-env007.tcl~2e6fab912f6358c0: Delete: storage/bdb/test/env007.tcl BitKeeper/deleted/.del-env008.tcl~fd2df2fc17f08dc7: Delete: storage/bdb/test/env008.tcl BitKeeper/deleted/.del-env009.tcl~98a210e96f4b00e0: Delete: storage/bdb/test/env009.tcl BitKeeper/deleted/.del-env010.tcl~41352761470bb901: Delete: storage/bdb/test/env010.tcl BitKeeper/deleted/.del-env011.tcl~76ceb885491bc871: Delete: storage/bdb/test/env011.tcl BitKeeper/deleted/.del-hsearch.tcl~34a91568c7541aa: Delete: storage/bdb/test/hsearch.tcl BitKeeper/deleted/.del-join.tcl~b503918cf9e496d1: Delete: storage/bdb/test/join.tcl BitKeeper/deleted/.del-lock001.tcl~5e6ac15c660212c3: Delete: storage/bdb/test/lock001.tcl BitKeeper/deleted/.del-lock002.tcl~ba8f9432fb2ac00d: Delete: storage/bdb/test/lock002.tcl BitKeeper/deleted/.del-lock003.tcl~759cc2bf13380ab5: Delete: storage/bdb/test/lock003.tcl BitKeeper/deleted/.del-lock004.tcl~9e51c2eba91943a0: Delete: storage/bdb/test/lock004.tcl BitKeeper/deleted/.del-lock005.tcl~a7a6d094b22f85bb: Delete: storage/bdb/test/lock005.tcl BitKeeper/deleted/.del-lockscript.tcl~a5adf8083780b5ab: Delete: storage/bdb/test/lockscript.tcl BitKeeper/deleted/.del-log001.tcl~f7d62b195fc6d308: Delete: storage/bdb/test/log001.tcl BitKeeper/deleted/.del-log002.tcl~c5acccda93ad2079: Delete: storage/bdb/test/log002.tcl BitKeeper/deleted/.del-log003.tcl~5bf8d64f40a171c: Delete: storage/bdb/test/log003.tcl BitKeeper/deleted/.del-log004.tcl~9bf0857249b0c080: Delete: storage/bdb/test/log004.tcl BitKeeper/deleted/.del-log005.tcl~12b7ef7064a17ef3: Delete: storage/bdb/test/log005.tcl BitKeeper/deleted/.del-logtrack.tcl~9197d7cf5d0bec52: Delete: storage/bdb/test/logtrack.tcl BitKeeper/deleted/.del-mdbscript.tcl~fba3f049f17b2af6: Delete: storage/bdb/test/mdbscript.tcl BitKeeper/deleted/.del-memp001.tcl~ab8f63cc9fcfb794: Delete: storage/bdb/test/memp001.tcl BitKeeper/deleted/.del-memp002.tcl~e5d78e0b4e90ff48: Delete: storage/bdb/test/memp002.tcl BitKeeper/deleted/.del-memp003.tcl~738862a9dcc682ab: Delete: storage/bdb/test/memp003.tcl BitKeeper/deleted/.del-mpoolscript.tcl~27bfb81620657f4f: Delete: storage/bdb/test/mpoolscript.tcl BitKeeper/deleted/.del-mutex001.tcl~3a27dee6ea8d5fff: Delete: storage/bdb/test/mutex001.tcl BitKeeper/deleted/.del-mutex002.tcl~cc0f545c6977810e: Delete: storage/bdb/test/mutex002.tcl BitKeeper/deleted/.del-mutex003.tcl~185695b35a871c24: Delete: storage/bdb/test/mutex003.tcl BitKeeper/deleted/.del-mutexscript.tcl~127cfd6c6ba76bc: Delete: storage/bdb/test/mutexscript.tcl BitKeeper/deleted/.del-ndbm.tcl~2335c936e466eb90: Delete: storage/bdb/test/ndbm.tcl BitKeeper/deleted/.del-parallel.tcl~6b75752d884e583f: Delete: storage/bdb/test/parallel.tcl BitKeeper/deleted/.del-recd001.tcl~955f092e1675447d: Delete: storage/bdb/test/recd001.tcl BitKeeper/deleted/.del-recd002.tcl~64e6f3ed12e7f820: Delete: storage/bdb/test/recd002.tcl BitKeeper/deleted/.del-recd003.tcl~d25a517848ce8210: Delete: storage/bdb/test/recd003.tcl BitKeeper/deleted/.del-recd004.tcl~250f0c36f985a4f: Delete: storage/bdb/test/recd004.tcl BitKeeper/deleted/.del-recd005.tcl~88455a0387834a98: Delete: storage/bdb/test/recd005.tcl BitKeeper/deleted/.del-recd006.tcl~a2328b7fc111d419: Delete: storage/bdb/test/recd006.tcl BitKeeper/deleted/.del-recd007.tcl~fe7a12aa6ba0702d: Delete: storage/bdb/test/recd007.tcl BitKeeper/deleted/.del-recd008.tcl~48d3de82ee1646b0: Delete: storage/bdb/test/recd008.tcl BitKeeper/deleted/.del-recd009.tcl~4595de558891dda1: Delete: storage/bdb/test/recd009.tcl BitKeeper/deleted/.del-recd010.tcl~8d09d87ef031e743: Delete: storage/bdb/test/recd010.tcl BitKeeper/deleted/.del-recd011.tcl~4926cb1abeacf65f: Delete: storage/bdb/test/recd011.tcl BitKeeper/deleted/.del-recd012.tcl~192b59ff81537ca: Delete: storage/bdb/test/recd012.tcl BitKeeper/deleted/.del-recd013.tcl~bf80f61e1bb44c96: Delete: storage/bdb/test/recd013.tcl BitKeeper/deleted/.del-recd014.tcl~41b5f6072460f516: Delete: storage/bdb/test/recd014.tcl BitKeeper/deleted/.del-recd015.tcl~b5c91e8098e824cf: Delete: storage/bdb/test/recd015.tcl BitKeeper/deleted/.del-recd016.tcl~e4f063003eb7829c: Delete: storage/bdb/test/recd016.tcl BitKeeper/deleted/.del-recd017.tcl~989c95e16c74a4f5: Delete: storage/bdb/test/recd017.tcl BitKeeper/deleted/.del-recd018.tcl~bd67acd9e512019b: Delete: storage/bdb/test/recd018.tcl BitKeeper/deleted/.del-recd019.tcl~1a3daef2f95097f3: Delete: storage/bdb/test/recd019.tcl BitKeeper/deleted/.del-recd020.tcl~6ef83cca50c2fbf: Delete: storage/bdb/test/recd020.tcl BitKeeper/deleted/.del-recd15scr.tcl~c8442ca1625aa1e6: Delete: storage/bdb/test/recd15scr.tcl BitKeeper/deleted/.del-recdscript.tcl~30705cb242304a85: Delete: storage/bdb/test/recdscript.tcl BitKeeper/deleted/.del-rep001.tcl~6639e63e9168016e: Delete: storage/bdb/test/rep001.tcl BitKeeper/deleted/.del-rep002.tcl~59b5d22d6bb249a6: Delete: storage/bdb/test/rep002.tcl BitKeeper/deleted/.del-rep003.tcl~d25a1ff77a550149: Delete: storage/bdb/test/rep003.tcl BitKeeper/deleted/.del-rep004.tcl~f3c21fbb341c6b94: Delete: storage/bdb/test/rep004.tcl BitKeeper/deleted/.del-rep005.tcl~1ad31ad578bfa51: Delete: storage/bdb/test/rep005.tcl BitKeeper/deleted/.del-reputils.tcl~b9bcb01ffef525fb: Delete: storage/bdb/test/reputils.tcl BitKeeper/deleted/.del-rpc001.tcl~88453ac2c774b942: Delete: storage/bdb/test/rpc001.tcl BitKeeper/deleted/.del-rpc002.tcl~3d34fc9ac248ab8b: Delete: storage/bdb/test/rpc002.tcl BitKeeper/deleted/.del-rpc003.tcl~5aba346f92bf16d: Delete: storage/bdb/test/rpc003.tcl BitKeeper/deleted/.del-rpc004.tcl~163755dc13aec750: Delete: storage/bdb/test/rpc004.tcl BitKeeper/deleted/.del-rpc005.tcl~36768d2d99f83f52: Delete: storage/bdb/test/rpc005.tcl BitKeeper/deleted/.del-rsrc001.tcl~231d9f2ee3ace69: Delete: storage/bdb/test/rsrc001.tcl BitKeeper/deleted/.del-rsrc002.tcl~3f58e3f58cb5c082: Delete: storage/bdb/test/rsrc002.tcl BitKeeper/deleted/.del-rsrc003.tcl~cedc4847fcfc57bc: Delete: storage/bdb/test/rsrc003.tcl BitKeeper/deleted/.del-rsrc004.tcl~823ee6b49ecae89b: Delete: storage/bdb/test/rsrc004.tcl BitKeeper/deleted/.del-sdb001.tcl~e1107607ca761934: Delete: storage/bdb/test/sdb001.tcl BitKeeper/deleted/.del-sdb002.tcl~964a21fbd96f8a22: Delete: storage/bdb/test/sdb002.tcl BitKeeper/deleted/.del-sdb003.tcl~5e0bc4d6f3816226: Delete: storage/bdb/test/sdb003.tcl BitKeeper/deleted/.del-sdb004.tcl~c327324b4768aed6: Delete: storage/bdb/test/sdb004.tcl BitKeeper/deleted/.del-sdb005.tcl~db6122e3c3556632: Delete: storage/bdb/test/sdb005.tcl BitKeeper/deleted/.del-sdb006.tcl~41b239316cafcac3: Delete: storage/bdb/test/sdb006.tcl BitKeeper/deleted/.del-sdb007.tcl~5481a5a3b51ceeb2: Delete: storage/bdb/test/sdb007.tcl BitKeeper/deleted/.del-sdb008.tcl~d75fbcdceb039bce: Delete: storage/bdb/test/sdb008.tcl BitKeeper/deleted/.del-sdb009.tcl~d6859031f9bc14dc: Delete: storage/bdb/test/sdb009.tcl BitKeeper/deleted/.del-sdb010.tcl~c256867b7db5c83b: Delete: storage/bdb/test/sdb010.tcl BitKeeper/deleted/.del-sdb011.tcl~26a56c6b2749a162: Delete: storage/bdb/test/sdb011.tcl BitKeeper/deleted/.del-sdb012.tcl~7f8955f99df6f9b5: Delete: storage/bdb/test/sdb012.tcl BitKeeper/deleted/.del-sdbscript.tcl~cba1b6edd36654aa: Delete: storage/bdb/test/sdbscript.tcl BitKeeper/deleted/.del-sdbtest001.tcl~f84b8a1c57eca170: Delete: storage/bdb/test/sdbtest001.tcl BitKeeper/deleted/.del-sdbtest002.tcl~a924afa17f40eb2e: Delete: storage/bdb/test/sdbtest002.tcl BitKeeper/deleted/.del-sdbutils.tcl~2805b251c9b76de3: Delete: storage/bdb/test/sdbutils.tcl BitKeeper/deleted/.del-sec001.tcl~c82cbc2e241aa3f1: Delete: storage/bdb/test/sec001.tcl BitKeeper/deleted/.del-sec002.tcl~9679d7e13b4c6ede: Delete: storage/bdb/test/sec002.tcl BitKeeper/deleted/.del-shelltest.tcl~4b16ad64f199e26c: Delete: storage/bdb/test/shelltest.tcl BitKeeper/deleted/.del-si001.tcl~75838f939815111a: Delete: storage/bdb/test/si001.tcl BitKeeper/deleted/.del-si002.tcl~e8510b09b71e3f5: Delete: storage/bdb/test/si002.tcl BitKeeper/deleted/.del-si003.tcl~aa00a31d48f38b81: Delete: storage/bdb/test/si003.tcl BitKeeper/deleted/.del-si004.tcl~78eb6f226e9f34f4: Delete: storage/bdb/test/si004.tcl BitKeeper/deleted/.del-si005.tcl~cab54cae8e9931d: Delete: storage/bdb/test/si005.tcl BitKeeper/deleted/.del-si006.tcl~eff36e7e40ec3b4b: Delete: storage/bdb/test/si006.tcl BitKeeper/deleted/.del-sindex.tcl~a1d08b36703dea6: Delete: storage/bdb/test/sindex.tcl BitKeeper/deleted/.del-sysscript.tcl~1e15f1c5ff8c1f22: Delete: storage/bdb/test/sysscript.tcl BitKeeper/deleted/.del-test.tcl~992e553a9b90e6bf: Delete: storage/bdb/test/test.tcl BitKeeper/deleted/.del-test001.tcl~7c497849d5cc47e: Delete: storage/bdb/test/test001.tcl BitKeeper/deleted/.del-test002.tcl~f62bf602371682f5: Delete: storage/bdb/test/test002.tcl BitKeeper/deleted/.del-test003.tcl~2c056a0eb88120c7: Delete: storage/bdb/test/test003.tcl BitKeeper/deleted/.del-test004.tcl~e74e086eed01b24f: Delete: storage/bdb/test/test004.tcl BitKeeper/deleted/.del-test005.tcl~b663af0e3f63cc12: Delete: storage/bdb/test/test005.tcl BitKeeper/deleted/.del-test006.tcl~7ef19ccd6c6981f2: Delete: storage/bdb/test/test006.tcl BitKeeper/deleted/.del-test007.tcl~708cd48497e87fad: Delete: storage/bdb/test/test007.tcl BitKeeper/deleted/.del-test008.tcl~b8aed254d8bffc6e: Delete: storage/bdb/test/test008.tcl BitKeeper/deleted/.del-test009.tcl~8258b689eb65d10a: Delete: storage/bdb/test/test009.tcl BitKeeper/deleted/.del-test010.tcl~a8916240242cdac0: Delete: storage/bdb/test/test010.tcl BitKeeper/deleted/.del-test011.tcl~49d813ea130c63c: Delete: storage/bdb/test/test011.tcl BitKeeper/deleted/.del-test012.tcl~3f9d19119ee4c1a6: Delete: storage/bdb/test/test012.tcl BitKeeper/deleted/.del-test013.tcl~fc95c9707c0c9408: Delete: storage/bdb/test/test013.tcl BitKeeper/deleted/.del-test014.tcl~1773e8eceb4d9e05: Delete: storage/bdb/test/test014.tcl BitKeeper/deleted/.del-test015.tcl~444573fe985a9dcf: Delete: storage/bdb/test/test015.tcl BitKeeper/deleted/.del-test016.tcl~36ba7ce481334a6: Delete: storage/bdb/test/test016.tcl BitKeeper/deleted/.del-test017.tcl~6e285070a55a47e1: Delete: storage/bdb/test/test017.tcl BitKeeper/deleted/.del-test018.tcl~b9dc3eb96babaa6b: Delete: storage/bdb/test/test018.tcl BitKeeper/deleted/.del-test019.tcl~589eb8c837a35f69: Delete: storage/bdb/test/test019.tcl BitKeeper/deleted/.del-test020.tcl~492031a3803d0ec9: Delete: storage/bdb/test/test020.tcl BitKeeper/deleted/.del-test021.tcl~a806bd0727c6d6a4: Delete: storage/bdb/test/test021.tcl BitKeeper/deleted/.del-test022.tcl~6010069b5dc180c0: Delete: storage/bdb/test/test022.tcl BitKeeper/deleted/.del-test023.tcl~abe3e5ec804485c0: Delete: storage/bdb/test/test023.tcl BitKeeper/deleted/.del-test024.tcl~76c31e5db529695c: Delete: storage/bdb/test/test024.tcl BitKeeper/deleted/.del-test025.tcl~3514c2e3ab50d597: Delete: storage/bdb/test/test025.tcl BitKeeper/deleted/.del-test026.tcl~e078236ba1044355: Delete: storage/bdb/test/test026.tcl BitKeeper/deleted/.del-test027.tcl~ffa13609f8f0ffdb: Delete: storage/bdb/test/test027.tcl BitKeeper/deleted/.del-test028.tcl~b235714f33c6ccb: Delete: storage/bdb/test/test028.tcl BitKeeper/deleted/.del-test029.tcl~998729678d5b6299: Delete: storage/bdb/test/test029.tcl BitKeeper/deleted/.del-test030.tcl~8fbed7506ae5f5ae: Delete: storage/bdb/test/test030.tcl BitKeeper/deleted/.del-test031.tcl~4670fab63b50d57f: Delete: storage/bdb/test/test031.tcl BitKeeper/deleted/.del-test032.tcl~3d301e8996def9da: Delete: storage/bdb/test/test032.tcl BitKeeper/deleted/.del-test033.tcl~8fef4678e3dfdc1a: Delete: storage/bdb/test/test033.tcl BitKeeper/deleted/.del-test034.tcl~6f4fc46225bdddb6: Delete: storage/bdb/test/test034.tcl BitKeeper/deleted/.del-test035.tcl~ee79f3198de4a6c3: Delete: storage/bdb/test/test035.tcl BitKeeper/deleted/.del-test036.tcl~70c97aa8199ce2b2: Delete: storage/bdb/test/test036.tcl BitKeeper/deleted/.del-test037.tcl~84a15153bf328893: Delete: storage/bdb/test/test037.tcl BitKeeper/deleted/.del-test038.tcl~4b86951797326fb6: Delete: storage/bdb/test/test038.tcl BitKeeper/deleted/.del-test039.tcl~bbe64cdcbf284055: Delete: storage/bdb/test/test039.tcl BitKeeper/deleted/.del-test040.tcl~186087bc93037dc8: Delete: storage/bdb/test/test040.tcl BitKeeper/deleted/.del-test041.tcl~58dd11454418f1df: Delete: storage/bdb/test/test041.tcl BitKeeper/deleted/.del-test042.tcl~e3abc60a16e18ea7: Delete: storage/bdb/test/test042.tcl BitKeeper/deleted/.del-test043.tcl~2d276374e36cc44e: Delete: storage/bdb/test/test043.tcl BitKeeper/deleted/.del-test044.tcl~edf4ad84a76362fa: Delete: storage/bdb/test/test044.tcl BitKeeper/deleted/.del-test045.tcl~e03bd4ebab641805: Delete: storage/bdb/test/test045.tcl BitKeeper/deleted/.del-test046.tcl~b37133329df2b0d6: Delete: storage/bdb/test/test046.tcl BitKeeper/deleted/.del-test047.tcl~2d3f2b721d637b6a: Delete: storage/bdb/test/test047.tcl BitKeeper/deleted/.del-test048.tcl~1ac7bb441bd5bdb6: Delete: storage/bdb/test/test048.tcl BitKeeper/deleted/.del-test049.tcl~e158663325c08d2c: Delete: storage/bdb/test/test049.tcl BitKeeper/deleted/.del-test050.tcl~f3703b59466a8502: Delete: storage/bdb/test/test050.tcl BitKeeper/deleted/.del-test051.tcl~7799ccc410e40530: Delete: storage/bdb/test/test051.tcl BitKeeper/deleted/.del-test052.tcl~238c71b3c044d73d: Delete: storage/bdb/test/test052.tcl BitKeeper/deleted/.del-test053.tcl~c28e0d69cb1bb6bf: Delete: storage/bdb/test/test053.tcl BitKeeper/deleted/.del-test054.tcl~f182b2517dfecc42: Delete: storage/bdb/test/test054.tcl BitKeeper/deleted/.del-test055.tcl~e40f096f58000daa: Delete: storage/bdb/test/test055.tcl BitKeeper/deleted/.del-test056.tcl~61f364b68fd25228: Delete: storage/bdb/test/test056.tcl BitKeeper/deleted/.del-test057.tcl~abd7b4edceb7f3d8: Delete: storage/bdb/test/test057.tcl BitKeeper/deleted/.del-test058.tcl~8a02c0ea44840334: Delete: storage/bdb/test/test058.tcl BitKeeper/deleted/.del-test059.tcl~a3b61d65c0684efd: Delete: storage/bdb/test/test059.tcl BitKeeper/deleted/.del-test060.tcl~2ba894fe978960c2: Delete: storage/bdb/test/test060.tcl BitKeeper/deleted/.del-test061.tcl~eaf129c2ccbc3f5c: Delete: storage/bdb/test/test061.tcl BitKeeper/deleted/.del-test062.tcl~53f36156b352c123: Delete: storage/bdb/test/test062.tcl BitKeeper/deleted/.del-test063.tcl~b0413d6a47dd7713: Delete: storage/bdb/test/test063.tcl BitKeeper/deleted/.del-test064.tcl~8c8d79643c127a4: Delete: storage/bdb/test/test064.tcl BitKeeper/deleted/.del-test065.tcl~44d82ad263c529ea: Delete: storage/bdb/test/test065.tcl BitKeeper/deleted/.del-test066.tcl~ad0ca187291e5923: Delete: storage/bdb/test/test066.tcl BitKeeper/deleted/.del-test067.tcl~4da80670cae6f271: Delete: storage/bdb/test/test067.tcl BitKeeper/deleted/.del-test068.tcl~a56db5bdaa6097fd: Delete: storage/bdb/test/test068.tcl BitKeeper/deleted/.del-test069.tcl~a39feaa4f4b037c9: Delete: storage/bdb/test/test069.tcl BitKeeper/deleted/.del-test070.tcl~35559617bc69dbdb: Delete: storage/bdb/test/test070.tcl BitKeeper/deleted/.del-test071.tcl~6af93fa1c1ac179c: Delete: storage/bdb/test/test071.tcl BitKeeper/deleted/.del-test072.tcl~e1630e29183f75b5: Delete: storage/bdb/test/test072.tcl BitKeeper/deleted/.del-test073.tcl~dbb57c71d31a93e8: Delete: storage/bdb/test/test073.tcl BitKeeper/deleted/.del-test074.tcl~9b36a93ed679ec3b: Delete: storage/bdb/test/test074.tcl BitKeeper/deleted/.del-test075.tcl~55f0e90e9948697: Delete: storage/bdb/test/test075.tcl BitKeeper/deleted/.del-test076.tcl~5ec9f2c249cd7d6c: Delete: storage/bdb/test/test076.tcl BitKeeper/deleted/.del-test077.tcl~920463fe1710f362: Delete: storage/bdb/test/test077.tcl BitKeeper/deleted/.del-test078.tcl~e968797716541059: Delete: storage/bdb/test/test078.tcl BitKeeper/deleted/.del-test079.tcl~dab9573083074ebd: Delete: storage/bdb/test/test079.tcl BitKeeper/deleted/.del-test080.tcl~fdeda1207812183a: Delete: storage/bdb/test/test080.tcl BitKeeper/deleted/.del-test081.tcl~965a56e3a45b064e: Delete: storage/bdb/test/test081.tcl BitKeeper/deleted/.del-test082.tcl~9e31d460450e1eca: Delete: storage/bdb/test/test082.tcl BitKeeper/deleted/.del-test083.tcl~14a579863e09cadc: Delete: storage/bdb/test/test083.tcl BitKeeper/deleted/.del-test084.tcl~b4ef1538a604bfb2: Delete: storage/bdb/test/test084.tcl BitKeeper/deleted/.del-test085.tcl~49a7f28eb2f8d1c1: Delete: storage/bdb/test/test085.tcl BitKeeper/deleted/.del-test086.tcl~6a011092ed6492ab: Delete: storage/bdb/test/test086.tcl BitKeeper/deleted/.del-test087.tcl~67ffc668cffee04c: Delete: storage/bdb/test/test087.tcl BitKeeper/deleted/.del-test088.tcl~189537e912432d04: Delete: storage/bdb/test/test088.tcl BitKeeper/deleted/.del-test089.tcl~4d777a6fab77ee5f: Delete: storage/bdb/test/test089.tcl BitKeeper/deleted/.del-test090.tcl~df90cf443e7fd392: Delete: storage/bdb/test/test090.tcl BitKeeper/deleted/.del-test091.tcl~7b3a2c8871cf0b93: Delete: storage/bdb/test/test091.tcl BitKeeper/deleted/.del-test092.tcl~1ca87063b7aae056: Delete: storage/bdb/test/test092.tcl BitKeeper/deleted/.del-test093.tcl~58abaa0e370f696d: Delete: storage/bdb/test/test093.tcl BitKeeper/deleted/.del-test094.tcl~ac0a4b615a6be00d: Delete: storage/bdb/test/test094.tcl BitKeeper/deleted/.del-test095.tcl~65633e864e895773: Delete: storage/bdb/test/test095.tcl BitKeeper/deleted/.del-test096.tcl~a4259e5794261692: Delete: storage/bdb/test/test096.tcl BitKeeper/deleted/.del-test097.tcl~59915885eff391d9: Delete: storage/bdb/test/test097.tcl BitKeeper/deleted/.del-test098.tcl~421438a23fdc8da1: Delete: storage/bdb/test/test098.tcl BitKeeper/deleted/.del-test099.tcl~ecb60f71caa0104e: Delete: storage/bdb/test/test099.tcl BitKeeper/deleted/.del-test100.tcl~cbf3c2ab49a445: Delete: storage/bdb/test/test100.tcl BitKeeper/deleted/.del-test101.tcl~2a7ed4a3bb8a48a3: Delete: storage/bdb/test/test101.tcl BitKeeper/deleted/.del-testparams.tcl~1b39050ecb624442: Delete: storage/bdb/test/testparams.tcl BitKeeper/deleted/.del-testutils.tcl~9e77f5853c538eaf: Delete: storage/bdb/test/testutils.tcl BitKeeper/deleted/.del-txn001.tcl~affc012a3da3cc8a: Delete: storage/bdb/test/txn001.tcl BitKeeper/deleted/.del-txn002.tcl~42f2b0fa86e4a807: Delete: storage/bdb/test/txn002.tcl BitKeeper/deleted/.del-txn003.tcl~41bd535632d28944: Delete: storage/bdb/test/txn003.tcl BitKeeper/deleted/.del-txn004.tcl~36e409a3c47a6380: Delete: storage/bdb/test/txn004.tcl BitKeeper/deleted/.del-txn005.tcl~c7c258c01a825f9f: Delete: storage/bdb/test/txn005.tcl BitKeeper/deleted/.del-txn006.tcl~33fb6688c9cb2394: Delete: storage/bdb/test/txn006.tcl BitKeeper/deleted/.del-txn007.tcl~1e55f1c1b493abb3: Delete: storage/bdb/test/txn007.tcl BitKeeper/deleted/.del-txn008.tcl~8d3b828ad12cda69: Delete: storage/bdb/test/txn008.tcl BitKeeper/deleted/.del-txn009.tcl~c57ad8934bc8cc8b: Delete: storage/bdb/test/txn009.tcl BitKeeper/deleted/.del-txnscript.tcl~fea6f1c6aa45aa4: Delete: storage/bdb/test/txnscript.tcl BitKeeper/deleted/.del-update.tcl~eb74470da58e545a: Delete: storage/bdb/test/update.tcl BitKeeper/deleted/.del-upgrade.tcl~9aeaf897775dec6e: Delete: storage/bdb/test/upgrade.tcl BitKeeper/deleted/.del-wordlist~153352d7533f7f60: Delete: storage/bdb/test/wordlist BitKeeper/deleted/.del-wrap.tcl~89400388e5b234c2: Delete: storage/bdb/test/wrap.tcl BitKeeper/deleted/.del-chk.code~4c8908261a6053fd: Delete: storage/bdb/test/scr001/chk.code BitKeeper/deleted/.del-chk.def~c9d08d27a526d44b: Delete: storage/bdb/test/scr002/chk.def BitKeeper/deleted/.del-chk.define~2a0fb00fc4ed968e: Delete: storage/bdb/test/scr003/chk.define BitKeeper/deleted/.del-chk.javafiles~9e3dd374dfb0aa3a: Delete: storage/bdb/test/scr004/chk.javafiles BitKeeper/deleted/.del-chk.nl~70b4e5b798c753ed: Delete: storage/bdb/test/scr005/chk.nl BitKeeper/deleted/.del-chk.offt~8a3c305110b12735: Delete: storage/bdb/test/scr006/chk.offt BitKeeper/deleted/.del-chk.proto~d1923cdd90414e56: Delete: storage/bdb/test/scr007/chk.proto BitKeeper/deleted/.del-chk.pubdef~bd699d2321166d30: Delete: storage/bdb/test/scr008/chk.pubdef BitKeeper/deleted/.del-chk.srcfiles~d4d837e36a9ba9f1: Delete: storage/bdb/test/scr009/chk.srcfiles BitKeeper/deleted/.del-chk.str~cb7fd8a4f2a1fad1: Delete: storage/bdb/test/scr010/chk.str BitKeeper/deleted/.del-spell.ok~5d9d5e442e5ebf62: Delete: storage/bdb/test/scr010/spell.ok BitKeeper/deleted/.del-chk.tags~8ee11b591e1a18fe: Delete: storage/bdb/test/scr011/chk.tags BitKeeper/deleted/.del-chk.stats~38fd26c0ece7131d: Delete: storage/bdb/test/scr013/chk.stats BitKeeper/deleted/.del-chk.vx_code~ec114356750bc985: Delete: storage/bdb/test/scr012/chk.vx_code BitKeeper/deleted/.del-chk.err~ca4595c9dbc93f1b: Delete: storage/bdb/test/scr014/chk.err BitKeeper/deleted/.del-README~29417fd1ec6b2679: Delete: storage/bdb/test/scr015/README BitKeeper/deleted/.del-TestConstruct01.cpp~425c78577f8f28c8: Delete: storage/bdb/test/scr015/TestConstruct01.cpp BitKeeper/deleted/.del-TestConstruct01.testerr~6e4e36b2dc63c283: Delete: storage/bdb/test/scr015/TestConstruct01.testerr BitKeeper/deleted/.del-TestConstruct01.testout~5e8776554283a4c4: Delete: storage/bdb/test/scr015/TestConstruct01.testout BitKeeper/deleted/.del-TestExceptInclude.cpp~67d314a055e90bf3: Delete: storage/bdb/test/scr015/TestExceptInclude.cpp BitKeeper/deleted/.del-TestGetSetMethods.cpp~ce02cae6e67225d: Delete: storage/bdb/test/scr015/TestGetSetMethods.cpp BitKeeper/deleted/.del-TestKeyRange.cpp~9b80f48a5174a5c9: Delete: storage/bdb/test/scr015/TestKeyRange.cpp BitKeeper/deleted/.del-TestKeyRange.testin~67ca6fe17815d21b: Delete: storage/bdb/test/scr015/TestKeyRange.testin BitKeeper/deleted/.del-TestKeyRange.testout~5bcbbf21d18eff: Delete: storage/bdb/test/scr015/TestKeyRange.testout BitKeeper/deleted/.del-TestLogc.cpp~bad0027b7264e53e: Delete: storage/bdb/test/scr015/TestLogc.cpp BitKeeper/deleted/.del-TestLogc.testout~a4609b75a4779f43: Delete: storage/bdb/test/scr015/TestLogc.testout BitKeeper/deleted/.del-TestSimpleAccess.cpp~bd7c1d1d12b641d: Delete: storage/bdb/test/scr015/TestSimpleAccess.cpp BitKeeper/deleted/.del-TestSimpleAccess.testout~5efeed9c81b9bc27: Delete: storage/bdb/test/scr015/TestSimpleAccess.testout BitKeeper/deleted/.del-TestTruncate.cpp~25ed0d942f0a5369: Delete: storage/bdb/test/scr015/TestTruncate.cpp BitKeeper/deleted/.del-TestTruncate.testout~ccbc326c37208fa9: Delete: storage/bdb/test/scr015/TestTruncate.testout BitKeeper/deleted/.del-chk.cxxtests~c40a22d26ccf756a: Delete: storage/bdb/test/scr015/chk.cxxtests BitKeeper/deleted/.del-ignore~a29a88595efdfb44: Delete: storage/bdb/test/scr015/ignore BitKeeper/deleted/.del-testall~a38a540fd662e4b: Delete: storage/bdb/test/scr015/testall BitKeeper/deleted/.del-testone~2611e5ac141df9e: Delete: storage/bdb/test/scr015/testone BitKeeper/deleted/.del-CallbackTest.java~cbfa27249d061cfb: Delete: storage/bdb/test/scr016/CallbackTest.java BitKeeper/deleted/.del-CallbackTest.testout~71e902a33379ba52: Delete: storage/bdb/test/scr016/CallbackTest.testout BitKeeper/deleted/.del-README~c0e58cbcc5c57ddc: Delete: storage/bdb/test/scr016/README BitKeeper/deleted/.del-TestAppendRecno.java~8ad6fa89c4cff7cd: Delete: storage/bdb/test/scr016/TestAppendRecno.java BitKeeper/deleted/.del-TestAppendRecno.testout~43c535efe914c498: Delete: storage/bdb/test/scr016/TestAppendRecno.testout BitKeeper/deleted/.del-TestAssociate.java~cd029537c3af5252: Delete: storage/bdb/test/scr016/TestAssociate.java BitKeeper/deleted/.del-TestAssociate.testout~261a98eaac389069: Delete: storage/bdb/test/scr016/TestAssociate.testout BitKeeper/deleted/.del-TestClosedDb.java~6f99db2fb2fa2d15: Delete: storage/bdb/test/scr016/TestClosedDb.java BitKeeper/deleted/.del-TestClosedDb.testout~46a360b011d96925: Delete: storage/bdb/test/scr016/TestClosedDb.testout BitKeeper/deleted/.del-TestConstruct01.java~8b68df1abcf4843: Delete: storage/bdb/test/scr016/TestConstruct01.java BitKeeper/deleted/.del-TestConstruct01.testerr~8be3723f205f31dd: Delete: storage/bdb/test/scr016/TestConstruct01.testerr BitKeeper/deleted/.del-TestConstruct01.testout~59baf46019af0f93: Delete: storage/bdb/test/scr016/TestConstruct01.testout BitKeeper/deleted/.del-TestConstruct02.java~7629249c946fe58f: Delete: storage/bdb/test/scr016/TestConstruct02.java BitKeeper/deleted/.del-TestConstruct02.testout~129a58ccfb4f9868: Delete: storage/bdb/test/scr016/TestConstruct02.testout BitKeeper/deleted/.del-TestDbtFlags.java~6da39a3c264c5e1: Delete: storage/bdb/test/scr016/TestDbtFlags.java BitKeeper/deleted/.del-TestDbtFlags.testerr~bf1b8223db949631: Delete: storage/bdb/test/scr016/TestDbtFlags.testerr BitKeeper/deleted/.del-TestDbtFlags.testout~a104075868b55448: Delete: storage/bdb/test/scr016/TestDbtFlags.testout BitKeeper/deleted/.del-TestGetSetMethods.java~ce458701b96fbae5: Delete: storage/bdb/test/scr016/TestGetSetMethods.java BitKeeper/deleted/.del-TestKeyRange.java~bbedf620a38ba528: Delete: storage/bdb/test/scr016/TestKeyRange.java BitKeeper/deleted/.del-TestKeyRange.testout~bea8cd86cb028218: Delete: storage/bdb/test/scr016/TestKeyRange.testout BitKeeper/deleted/.del-TestLockVec.java~f2a881f24fcd0a3d: Delete: storage/bdb/test/scr016/TestLockVec.java BitKeeper/deleted/.del-TestLockVec.testout~100308b4d8ffe94d: Delete: storage/bdb/test/scr016/TestLockVec.testout BitKeeper/deleted/.del-TestLogc.java~73a824d2d67a26d: Delete: storage/bdb/test/scr016/TestLogc.java BitKeeper/deleted/.del-TestLogc.testout~b3a920723db1fdaf: Delete: storage/bdb/test/scr016/TestLogc.testout BitKeeper/deleted/.del-TestOpenEmpty.java~ca2c539e136b83f4: Delete: storage/bdb/test/scr016/TestOpenEmpty.java BitKeeper/deleted/.del-TestOpenEmpty.testerr~94241960bab03730: Delete: storage/bdb/test/scr016/TestOpenEmpty.testerr BitKeeper/deleted/.del-TestReplication.java~2007f084cfd99774: Delete: storage/bdb/test/scr016/TestReplication.java BitKeeper/deleted/.del-TestRpcServer.java~136338ae7114729e: Delete: storage/bdb/test/scr016/TestRpcServer.java BitKeeper/deleted/.del-TestSameDbt.java~6b58f6d4667df271: Delete: storage/bdb/test/scr016/TestSameDbt.java BitKeeper/deleted/.del-TestSameDbt.testout~32315254d8be8d41: Delete: storage/bdb/test/scr016/TestSameDbt.testout BitKeeper/deleted/.del-TestSimpleAccess.java~9d44ee26d00dabcf: Delete: storage/bdb/test/scr016/TestSimpleAccess.java BitKeeper/deleted/.del-TestSimpleAccess.testout~c93b176ca5d121fa: Delete: storage/bdb/test/scr016/TestSimpleAccess.testout BitKeeper/deleted/.del-TestStat.java~b3254049c84f2e89: Delete: storage/bdb/test/scr016/TestStat.java BitKeeper/deleted/.del-TestStat.testout~59276cedffc035: Delete: storage/bdb/test/scr016/TestStat.testout BitKeeper/deleted/.del-TestTruncate.java~5ffd51efb9a0f54c: Delete: storage/bdb/test/scr016/TestTruncate.java BitKeeper/deleted/.del-TestTruncate.testout~1078a6e877e3e063: Delete: storage/bdb/test/scr016/TestTruncate.testout BitKeeper/deleted/.del-TestUtil.java~52eef127e5085616: Delete: storage/bdb/test/scr016/TestUtil.java BitKeeper/deleted/.del-TestXAServlet.java~aa3f0d373e0305dc: Delete: storage/bdb/test/scr016/TestXAServlet.java BitKeeper/deleted/.del-chk.javatests~76196d06146273e8: Delete: storage/bdb/test/scr016/chk.javatests BitKeeper/deleted/.del-ignore~2d9ec9d6d8c49ffe: Delete: storage/bdb/test/scr016/ignore BitKeeper/deleted/.del-testall~eba2218c5e16bb31: Delete: storage/bdb/test/scr016/testall BitKeeper/deleted/.del-testone~79a7cbbc69ecdfe1: Delete: storage/bdb/test/scr016/testone BitKeeper/deleted/.del-O.BH~84b85479f477037b: Delete: storage/bdb/test/scr017/O.BH BitKeeper/deleted/.del-O.R~efc67c567acd7ee8: Delete: storage/bdb/test/scr017/O.R BitKeeper/deleted/.del-chk.db185~13e5ae02d2f2952f: Delete: storage/bdb/test/scr017/chk.db185 BitKeeper/deleted/.del-t.c~b59e28ce10263d8b: Delete: storage/bdb/test/scr017/t.c BitKeeper/deleted/.del-chk.comma~f4c73d979215e8d9: Delete: storage/bdb/test/scr018/chk.comma BitKeeper/deleted/.del-t.c~4c80ca5b22f28809: Delete: storage/bdb/test/scr018/t.c BitKeeper/deleted/.del-chk.include~6ccbc39213db1cfc: Delete: storage/bdb/test/scr019/chk.include BitKeeper/deleted/.del-chk.flags~d3427093f84cf9cb: Delete: storage/bdb/test/scr021/chk.flags BitKeeper/deleted/.del-chk.inc~4411d703b39a2d46: Delete: storage/bdb/test/scr020/chk.inc BitKeeper/deleted/.del-chk.rr~f562abf3c6627d13: Delete: storage/bdb/test/scr022/chk.rr BitKeeper/etc/ignore: Update ignore list for bdb storage/bdb/LICENSE: Import new version storage/bdb/btree/bt_compare.c: Import new version storage/bdb/btree/bt_conv.c: Import new version storage/bdb/btree/bt_curadj.c: Import new version storage/bdb/btree/bt_cursor.c: Import new version storage/bdb/btree/bt_delete.c: Import new version storage/bdb/btree/bt_method.c: Import new version storage/bdb/btree/bt_open.c: Import new version storage/bdb/btree/bt_put.c: Import new version storage/bdb/btree/bt_rec.c: Import new version storage/bdb/btree/bt_reclaim.c: Import new version storage/bdb/btree/bt_recno.c: Import new version storage/bdb/btree/bt_rsearch.c: Import new version storage/bdb/btree/bt_search.c: Import new version storage/bdb/btree/bt_split.c: Import new version storage/bdb/btree/bt_stat.c: Import new version storage/bdb/btree/bt_upgrade.c: Import new version storage/bdb/btree/bt_verify.c: Import new version storage/bdb/btree/btree.src: Import new version storage/bdb/build_win32/Berkeley_DB.dsw: Import new version storage/bdb/build_win32/app_dsp.src: Import new version storage/bdb/build_win32/dbkill.cpp: Import new version storage/bdb/build_win32/dynamic_dsp.src: Import new version storage/bdb/build_win32/libdb_tcl.def: Import new version storage/bdb/build_win32/libdbrc.src: Import new version storage/bdb/build_win32/static_dsp.src: Import new version storage/bdb/clib/getcwd.c: Import new version storage/bdb/clib/getopt.c: Import new version storage/bdb/clib/memcmp.c: Import new version storage/bdb/clib/memmove.c: Import new version storage/bdb/clib/raise.c: Import new version storage/bdb/clib/snprintf.c: Import new version storage/bdb/clib/strcasecmp.c: Import new version storage/bdb/clib/strdup.c: Import new version storage/bdb/clib/strerror.c: Import new version storage/bdb/clib/strtol.c: Import new version storage/bdb/clib/strtoul.c: Import new version storage/bdb/common/crypto_stub.c: Import new version storage/bdb/common/db_byteorder.c: Import new version storage/bdb/common/db_err.c: Import new version storage/bdb/common/db_getlong.c: Import new version storage/bdb/common/db_idspace.c: Import new version storage/bdb/common/db_log2.c: Import new version storage/bdb/common/util_arg.c: Import new version storage/bdb/common/util_cache.c: Import new version storage/bdb/common/util_log.c: Import new version storage/bdb/common/util_sig.c: Import new version storage/bdb/crypto/aes_method.c: Import new version storage/bdb/crypto/crypto.c: Import new version storage/bdb/crypto/crypto.html: Import new version storage/bdb/crypto/mersenne/mt19937db.c: Import new version storage/bdb/crypto/rijndael/rijndael-alg-fst.h: Import new version storage/bdb/crypto/rijndael/rijndael-api-fst.h: Import new version storage/bdb/cxx/cxx_db.cpp: Import new version storage/bdb/cxx/cxx_dbc.cpp: Import new version storage/bdb/cxx/cxx_dbt.cpp: Import new version storage/bdb/cxx/cxx_env.cpp: Import new version storage/bdb/cxx/cxx_except.cpp: Import new version storage/bdb/cxx/cxx_lock.cpp: Import new version storage/bdb/cxx/cxx_logc.cpp: Import new version storage/bdb/cxx/cxx_mpool.cpp: Import new version storage/bdb/cxx/cxx_multi.cpp: Import new version storage/bdb/cxx/cxx_seq.cpp: Import new version storage/bdb/cxx/cxx_txn.cpp: Import new version storage/bdb/db/crdel.src: Import new version storage/bdb/db/crdel_rec.c: Import new version storage/bdb/db/db.c: Import new version storage/bdb/db/db.src: Import new version storage/bdb/db/db_am.c: Import new version storage/bdb/db/db_cam.c: Import new version storage/bdb/db/db_conv.c: Import new version storage/bdb/db/db_dispatch.c: Import new version storage/bdb/db/db_dup.c: Import new version storage/bdb/db/db_iface.c: Import new version storage/bdb/db/db_join.c: Import new version storage/bdb/db/db_meta.c: Import new version storage/bdb/db/db_method.c: Import new version storage/bdb/db/db_open.c: Import new version storage/bdb/db/db_overflow.c: Import new version storage/bdb/db/db_ovfl_vrfy.c: Import new version storage/bdb/db/db_pr.c: Import new version storage/bdb/db/db_rec.c: Import new version storage/bdb/db/db_reclaim.c: Import new version storage/bdb/db/db_remove.c: Import new version storage/bdb/db/db_rename.c: Import new version storage/bdb/db/db_ret.c: Import new version storage/bdb/db/db_setid.c: Import new version storage/bdb/db/db_setlsn.c: Import new version storage/bdb/db/db_stati.c: Import new version storage/bdb/db/db_truncate.c: Import new version storage/bdb/db/db_upg.c: Import new version storage/bdb/db/db_upg_opd.c: Import new version storage/bdb/db/db_vrfy.c: Import new version storage/bdb/db/db_vrfy_stub.c: Import new version storage/bdb/db/db_vrfyutil.c: Import new version storage/bdb/db185/db185.c: Import new version storage/bdb/db185/db185_int.in: Import new version storage/bdb/db_archive/db_archive.c: Import new version storage/bdb/db_checkpoint/db_checkpoint.c: Import new version storage/bdb/db_deadlock/db_deadlock.c: Import new version storage/bdb/db_dump/db_dump.c: Import new version storage/bdb/db_dump185/db_dump185.c: Import new version storage/bdb/db_load/db_load.c: Import new version storage/bdb/db_printlog/README: Import new version storage/bdb/db_printlog/commit.awk: Import new version storage/bdb/db_printlog/count.awk: Import new version storage/bdb/db_printlog/db_printlog.c: Import new version storage/bdb/db_printlog/dbname.awk: Import new version storage/bdb/db_printlog/fileid.awk: Import new version storage/bdb/db_printlog/logstat.awk: Import new version storage/bdb/db_printlog/pgno.awk: Import new version storage/bdb/db_printlog/range.awk: Import new version storage/bdb/db_printlog/rectype.awk: Import new version storage/bdb/db_printlog/status.awk: Import new version storage/bdb/db_printlog/txn.awk: Import new version storage/bdb/db_recover/db_recover.c: Import new version storage/bdb/db_stat/db_stat.c: Import new version storage/bdb/db_stat/dd.sh: Import new version storage/bdb/db_upgrade/db_upgrade.c: Import new version storage/bdb/db_verify/db_verify.c: Import new version storage/bdb/dbinc/btree.h: Import new version storage/bdb/dbinc/crypto.h: Import new version storage/bdb/dbinc/cxx_int.h: Import new version storage/bdb/dbinc/db.in: Import new version storage/bdb/dbinc/db_185.in: Import new version storage/bdb/dbinc/db_am.h: Import new version storage/bdb/dbinc/db_cxx.in: Import new version storage/bdb/dbinc/db_dispatch.h: Import new version storage/bdb/dbinc/db_int.in: Import new version storage/bdb/dbinc/db_join.h: Import new version storage/bdb/dbinc/db_page.h: Import new version storage/bdb/dbinc/db_server_int.h: Import new version storage/bdb/dbinc/db_shash.h: Import new version storage/bdb/dbinc/db_swap.h: Import new version storage/bdb/dbinc/db_upgrade.h: Import new version storage/bdb/dbinc/db_verify.h: Import new version storage/bdb/dbinc/debug.h: Import new version storage/bdb/dbinc/fop.h: Import new version storage/bdb/dbinc/globals.h: Import new version storage/bdb/dbinc/hash.h: Import new version storage/bdb/dbinc/hmac.h: Import new version storage/bdb/dbinc/lock.h: Import new version storage/bdb/dbinc/log.h: Import new version storage/bdb/dbinc/mp.h: Import new version storage/bdb/dbinc/mutex.h: Import new version storage/bdb/dbinc/os.h: Import new version storage/bdb/dbinc/qam.h: Import new version storage/bdb/dbinc/queue.h: Import new version storage/bdb/dbinc/region.h: Import new version storage/bdb/dbinc/rep.h: Import new version storage/bdb/dbinc/shqueue.h: Import new version storage/bdb/dbinc/tcl_db.h: Import new version storage/bdb/dbinc/txn.h: Import new version storage/bdb/dbinc/xa.h: Import new version storage/bdb/dbm/dbm.c: Import new version storage/bdb/dbreg/dbreg.c: Import new version storage/bdb/dbreg/dbreg.src: Import new version storage/bdb/dbreg/dbreg_rec.c: Import new version storage/bdb/dbreg/dbreg_stat.c: Import new version storage/bdb/dbreg/dbreg_util.c: Import new version storage/bdb/dist/Makefile.in: Import new version storage/bdb/dist/RELEASE: Import new version storage/bdb/dist/aclocal/libtool.ac: Import new version storage/bdb/dist/aclocal/mutex.ac: Import new version storage/bdb/dist/aclocal/options.ac: Import new version storage/bdb/dist/aclocal/programs.ac: Import new version storage/bdb/dist/aclocal/rpc.ac: Import new version storage/bdb/dist/aclocal/sequence.ac: Import new version storage/bdb/dist/aclocal/sosuffix.ac: Import new version storage/bdb/dist/aclocal/tcl.ac: Import new version storage/bdb/dist/aclocal/types.ac: Import new version storage/bdb/dist/aclocal_java/ac_check_class.ac: Import new version storage/bdb/dist/aclocal_java/ac_check_classpath.ac: Import new version storage/bdb/dist/aclocal_java/ac_check_junit.ac: Import new version storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac: Import new version storage/bdb/dist/aclocal_java/ac_java_options.ac: Import new version storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_jar.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_java.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_java_works.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_javac.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac: Import new version storage/bdb/dist/aclocal_java/ac_prog_javah.ac: Import new version storage/bdb/dist/aclocal_java/ac_try_compile_java.ac: Import new version storage/bdb/dist/aclocal_java/ac_try_run_javac.ac: Import new version storage/bdb/dist/buildrel: Import new version storage/bdb/dist/config.guess: Import new version storage/bdb/dist/config.sub: Import new version storage/bdb/dist/configure.ac: Import new version storage/bdb/dist/gen_rec.awk: Import new version storage/bdb/dist/gen_rpc.awk: Import new version storage/bdb/dist/ltmain.sh: Import new version storage/bdb/dist/pubdef.in: Import new version storage/bdb/dist/s_all: Import new version storage/bdb/dist/s_config: Import new version storage/bdb/dist/s_crypto: Import new version storage/bdb/dist/s_include: Import new version storage/bdb/dist/s_java_const: Import new version storage/bdb/dist/s_java_stat: Import new version storage/bdb/dist/s_java_swig: Import new version storage/bdb/dist/s_java: Import new version storage/bdb/dist/s_je2db: Import new version storage/bdb/dist/s_perm: Import new version storage/bdb/dist/s_readme: Import new version storage/bdb/dist/s_recover: Import new version storage/bdb/dist/s_rpc: Import new version storage/bdb/dist/s_symlink: Import new version storage/bdb/dist/s_tags: Import new version storage/bdb/dist/s_test: Import new version storage/bdb/dist/s_vxworks: Import new version storage/bdb/dist/s_win32_dsp: Import new version storage/bdb/dist/s_win32: Import new version storage/bdb/dist/s_winmsi: Import new version storage/bdb/dist/srcfiles.in: Import new version storage/bdb/dist/vx_buildcd: Import new version storage/bdb/dist/vx_config.in: Import new version storage/bdb/dist/vx_setup/LICENSE.TXT: Import new version storage/bdb/dist/win_config.in: Import new version storage/bdb/dist/win_db.in: Import new version storage/bdb/dist/win_exports.in: Import new version storage/bdb/dist/winmsi/files.in: Import new version storage/bdb/dist/winmsi/links.in: Import new version storage/bdb/dist/winmsi/s_winmsi.fcn: Import new version storage/bdb/dist/winmsi/winbuild.bat: Import new version storage/bdb/env/db_salloc.c: Import new version storage/bdb/env/db_shash.c: Import new version storage/bdb/env/env_file.c: Import new version storage/bdb/env/env_method.c: Import new version storage/bdb/env/env_open.c: Import new version storage/bdb/env/env_recover.c: Import new version storage/bdb/env/env_region.c: Import new version storage/bdb/env/env_stat.c: Import new version storage/bdb/fileops/fileops.src: Import new version storage/bdb/fileops/fop_basic.c: Import new version storage/bdb/fileops/fop_rec.c: Import new version storage/bdb/fileops/fop_util.c: Import new version storage/bdb/hash/hash.c: Import new version storage/bdb/hash/hash.src: Import new version storage/bdb/hash/hash_conv.c: Import new version storage/bdb/hash/hash_dup.c: Import new version storage/bdb/hash/hash_func.c: Import new version storage/bdb/hash/hash_meta.c: Import new version storage/bdb/hash/hash_method.c: Import new version storage/bdb/hash/hash_open.c: Import new version storage/bdb/hash/hash_page.c: Import new version storage/bdb/hash/hash_rec.c: Import new version storage/bdb/hash/hash_reclaim.c: Import new version storage/bdb/hash/hash_stat.c: Import new version storage/bdb/hash/hash_stub.c: Import new version storage/bdb/hash/hash_upgrade.c: Import new version storage/bdb/hash/hash_verify.c: Import new version storage/bdb/hmac/hmac.c: Import new version storage/bdb/hmac/sha1.c: Import new version storage/bdb/hsearch/hsearch.c: Import new version storage/bdb/lock/Design: Import new version storage/bdb/lock/lock.c: Import new version storage/bdb/lock/lock_deadlock.c: Import new version storage/bdb/lock/lock_id.c: Import new version storage/bdb/lock/lock_list.c: Import new version storage/bdb/lock/lock_method.c: Import new version storage/bdb/lock/lock_region.c: Import new version storage/bdb/lock/lock_stat.c: Import new version storage/bdb/lock/lock_timer.c: Import new version storage/bdb/lock/lock_util.c: Import new version storage/bdb/log/log.c: Import new version storage/bdb/log/log_archive.c: Import new version storage/bdb/log/log_compare.c: Import new version storage/bdb/log/log_get.c: Import new version storage/bdb/log/log_method.c: Import new version storage/bdb/log/log_put.c: Import new version storage/bdb/log/log_stat.c: Import new version storage/bdb/mp/mp_alloc.c: Import new version storage/bdb/mp/mp_bh.c: Import new version storage/bdb/mp/mp_fget.c: Import new version storage/bdb/mp/mp_fmethod.c: Import new version storage/bdb/mp/mp_fopen.c: Import new version storage/bdb/mp/mp_fput.c: Import new version storage/bdb/mp/mp_fset.c: Import new version storage/bdb/mp/mp_method.c: Import new version storage/bdb/mp/mp_region.c: Import new version storage/bdb/mp/mp_register.c: Import new version storage/bdb/mp/mp_stat.c: Import new version storage/bdb/mp/mp_sync.c: Import new version storage/bdb/mp/mp_trickle.c: Import new version storage/bdb/mutex/README: Import new version storage/bdb/mutex/mut_fcntl.c: Import new version storage/bdb/mutex/mut_pthread.c: Import new version storage/bdb/mutex/mut_tas.c: Import new version storage/bdb/mutex/mut_win32.c: Import new version storage/bdb/mutex/tm.c: Import new version storage/bdb/mutex/uts4_cc.s: Import new version storage/bdb/os/os_abs.c: Import new version storage/bdb/os/os_alloc.c: Import new version storage/bdb/os/os_clock.c: Import new version storage/bdb/os/os_config.c: Import new version storage/bdb/os/os_dir.c: Import new version storage/bdb/os/os_errno.c: Import new version storage/bdb/os/os_fid.c: Import new version storage/bdb/os/os_fsync.c: Import new version storage/bdb/os/os_handle.c: Import new version storage/bdb/os/os_id.c: Import new version storage/bdb/os/os_map.c: Import new version storage/bdb/os/os_method.c: Import new version storage/bdb/os/os_oflags.c: Import new version storage/bdb/os/os_open.c: Import new version storage/bdb/os/os_region.c: Import new version storage/bdb/os/os_rename.c: Import new version storage/bdb/os/os_root.c: Import new version storage/bdb/os/os_rpath.c: Import new version storage/bdb/os/os_rw.c: Import new version storage/bdb/os/os_seek.c: Import new version storage/bdb/os/os_sleep.c: Import new version storage/bdb/os/os_spin.c: Import new version storage/bdb/os/os_stat.c: Import new version storage/bdb/os/os_tmpdir.c: Import new version storage/bdb/os/os_truncate.c: Import new version storage/bdb/os/os_unlink.c: Import new version storage/bdb/os_win32/os_abs.c: Import new version storage/bdb/os_win32/os_clock.c: Import new version storage/bdb/os_win32/os_config.c: Import new version storage/bdb/os_win32/os_dir.c: Import new version storage/bdb/os_win32/os_errno.c: Import new version storage/bdb/os_win32/os_fid.c: Import new version storage/bdb/os_win32/os_fsync.c: Import new version storage/bdb/os_win32/os_handle.c: Import new version storage/bdb/os_win32/os_map.c: Import new version storage/bdb/os_win32/os_open.c: Import new version storage/bdb/os_win32/os_rename.c: Import new version storage/bdb/os_win32/os_rw.c: Import new version storage/bdb/os_win32/os_seek.c: Import new version storage/bdb/os_win32/os_sleep.c: Import new version storage/bdb/os_win32/os_spin.c: Import new version storage/bdb/os_win32/os_stat.c: Import new version storage/bdb/os_win32/os_truncate.c: Import new version storage/bdb/os_win32/os_unlink.c: Import new version storage/bdb/qam/qam.c: Import new version storage/bdb/qam/qam.src: Import new version storage/bdb/qam/qam_conv.c: Import new version storage/bdb/qam/qam_files.c: Import new version storage/bdb/qam/qam_method.c: Import new version storage/bdb/qam/qam_open.c: Import new version storage/bdb/qam/qam_rec.c: Import new version storage/bdb/qam/qam_stat.c: Import new version storage/bdb/qam/qam_stub.c: Import new version storage/bdb/qam/qam_upgrade.c: Import new version storage/bdb/qam/qam_verify.c: Import new version storage/bdb/rep/rep.src: Import new version storage/bdb/rep/rep_backup.c: Import new version storage/bdb/rep/rep_method.c: Import new version storage/bdb/rep/rep_record.c: Import new version storage/bdb/rep/rep_region.c: Import new version storage/bdb/rep/rep_stat.c: Import new version storage/bdb/rep/rep_stub.c: Import new version storage/bdb/rep/rep_util.c: Import new version storage/bdb/sequence/seq_stat.c: Import new version storage/bdb/sequence/sequence.c: Import new version storage/bdb/txn/txn.c: Import new version storage/bdb/txn/txn.src: Import new version storage/bdb/txn/txn_method.c: Import new version storage/bdb/txn/txn_rec.c: Import new version storage/bdb/txn/txn_recover.c: Import new version storage/bdb/txn/txn_region.c: Import new version storage/bdb/txn/txn_stat.c: Import new version storage/bdb/txn/txn_util.c: Import new version storage/bdb/xa/xa.c: Import new version storage/bdb/xa/xa_db.c: Import new version storage/bdb/xa/xa_map.c: Import new version
Diffstat (limited to 'storage/bdb/test')
-rw-r--r--storage/bdb/test/archive.tcl256
-rw-r--r--storage/bdb/test/bigfile001.tcl85
-rw-r--r--storage/bdb/test/bigfile002.tcl45
-rw-r--r--storage/bdb/test/byteorder.tcl34
-rw-r--r--storage/bdb/test/conscript.tcl124
-rw-r--r--storage/bdb/test/dbm.tcl128
-rw-r--r--storage/bdb/test/dbscript.tcl357
-rw-r--r--storage/bdb/test/ddoyscript.tcl172
-rw-r--r--storage/bdb/test/ddscript.tcl44
-rw-r--r--storage/bdb/test/dead001.tcl88
-rw-r--r--storage/bdb/test/dead002.tcl82
-rw-r--r--storage/bdb/test/dead003.tcl99
-rw-r--r--storage/bdb/test/dead004.tcl108
-rw-r--r--storage/bdb/test/dead005.tcl89
-rw-r--r--storage/bdb/test/dead006.tcl16
-rw-r--r--storage/bdb/test/dead007.tcl36
-rw-r--r--storage/bdb/test/env001.tcl154
-rw-r--r--storage/bdb/test/env002.tcl156
-rw-r--r--storage/bdb/test/env003.tcl149
-rw-r--r--storage/bdb/test/env004.tcl95
-rw-r--r--storage/bdb/test/env005.tcl52
-rw-r--r--storage/bdb/test/env006.tcl91
-rw-r--r--storage/bdb/test/env007.tcl605
-rw-r--r--storage/bdb/test/env008.tcl73
-rw-r--r--storage/bdb/test/env009.tcl82
-rw-r--r--storage/bdb/test/env010.tcl50
-rw-r--r--storage/bdb/test/env011.tcl39
-rw-r--r--storage/bdb/test/hsearch.tcl51
-rw-r--r--storage/bdb/test/join.tcl455
-rw-r--r--storage/bdb/test/lock001.tcl122
-rw-r--r--storage/bdb/test/lock002.tcl157
-rw-r--r--storage/bdb/test/lock003.tcl101
-rw-r--r--storage/bdb/test/lock004.tcl29
-rw-r--r--storage/bdb/test/lock005.tcl177
-rw-r--r--storage/bdb/test/lockscript.tcl117
-rw-r--r--storage/bdb/test/log001.tcl144
-rw-r--r--storage/bdb/test/log002.tcl102
-rw-r--r--storage/bdb/test/log003.tcl144
-rw-r--r--storage/bdb/test/log004.tcl52
-rw-r--r--storage/bdb/test/log005.tcl118
-rw-r--r--storage/bdb/test/logtrack.tcl139
-rw-r--r--storage/bdb/test/mdbscript.tcl400
-rw-r--r--storage/bdb/test/memp001.tcl199
-rw-r--r--storage/bdb/test/memp002.tcl62
-rw-r--r--storage/bdb/test/memp003.tcl154
-rw-r--r--storage/bdb/test/mpoolscript.tcl171
-rw-r--r--storage/bdb/test/mutex001.tcl51
-rw-r--r--storage/bdb/test/mutex002.tcl94
-rw-r--r--storage/bdb/test/mutex003.tcl52
-rw-r--r--storage/bdb/test/mutexscript.tcl91
-rw-r--r--storage/bdb/test/ndbm.tcl144
-rw-r--r--storage/bdb/test/parallel.tcl383
-rw-r--r--storage/bdb/test/recd001.tcl242
-rw-r--r--storage/bdb/test/recd002.tcl103
-rw-r--r--storage/bdb/test/recd003.tcl119
-rw-r--r--storage/bdb/test/recd004.tcl95
-rw-r--r--storage/bdb/test/recd005.tcl230
-rw-r--r--storage/bdb/test/recd006.tcl262
-rw-r--r--storage/bdb/test/recd007.tcl906
-rw-r--r--storage/bdb/test/recd008.tcl226
-rw-r--r--storage/bdb/test/recd009.tcl180
-rw-r--r--storage/bdb/test/recd010.tcl257
-rw-r--r--storage/bdb/test/recd011.tcl136
-rw-r--r--storage/bdb/test/recd012.tcl433
-rw-r--r--storage/bdb/test/recd013.tcl285
-rw-r--r--storage/bdb/test/recd014.tcl445
-rw-r--r--storage/bdb/test/recd015.tcl162
-rw-r--r--storage/bdb/test/recd016.tcl179
-rw-r--r--storage/bdb/test/recd017.tcl158
-rw-r--r--storage/bdb/test/recd018.tcl110
-rw-r--r--storage/bdb/test/recd019.tcl123
-rw-r--r--storage/bdb/test/recd020.tcl80
-rw-r--r--storage/bdb/test/recd15scr.tcl74
-rw-r--r--storage/bdb/test/recdscript.tcl37
-rw-r--r--storage/bdb/test/rep001.tcl198
-rw-r--r--storage/bdb/test/rep002.tcl323
-rw-r--r--storage/bdb/test/rep003.tcl277
-rw-r--r--storage/bdb/test/rep004.tcl198
-rw-r--r--storage/bdb/test/rep005.tcl307
-rw-r--r--storage/bdb/test/reputils.tcl1275
-rw-r--r--storage/bdb/test/rpc001.tcl476
-rw-r--r--storage/bdb/test/rpc002.tcl161
-rw-r--r--storage/bdb/test/rpc003.tcl184
-rw-r--r--storage/bdb/test/rpc004.tcl87
-rw-r--r--storage/bdb/test/rpc005.tcl158
-rw-r--r--storage/bdb/test/rsrc001.tcl221
-rw-r--r--storage/bdb/test/rsrc002.tcl66
-rw-r--r--storage/bdb/test/rsrc003.tcl173
-rw-r--r--storage/bdb/test/rsrc004.tcl52
-rw-r--r--storage/bdb/test/scr001/chk.code38
-rw-r--r--storage/bdb/test/scr002/chk.def64
-rw-r--r--storage/bdb/test/scr003/chk.define86
-rw-r--r--storage/bdb/test/scr004/chk.javafiles30
-rw-r--r--storage/bdb/test/scr005/chk.nl114
-rw-r--r--storage/bdb/test/scr006/chk.offt52
-rw-r--r--storage/bdb/test/scr007/chk.proto44
-rw-r--r--storage/bdb/test/scr008/chk.pubdef185
-rw-r--r--storage/bdb/test/scr009/chk.srcfiles47
-rw-r--r--storage/bdb/test/scr010/chk.str42
-rw-r--r--storage/bdb/test/scr010/spell.ok3697
-rw-r--r--storage/bdb/test/scr011/chk.tags46
-rw-r--r--storage/bdb/test/scr012/chk.vx_code61
-rw-r--r--storage/bdb/test/scr013/chk.stats125
-rw-r--r--storage/bdb/test/scr014/chk.err34
-rw-r--r--storage/bdb/test/scr015/README36
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.cpp323
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testerr0
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testout27
-rw-r--r--storage/bdb/test/scr015/TestExceptInclude.cpp27
-rw-r--r--storage/bdb/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.cpp172
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testin8
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testout19
-rw-r--r--storage/bdb/test/scr015/TestLogc.cpp101
-rw-r--r--storage/bdb/test/scr015/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr015/TestTruncate.cpp84
-rw-r--r--storage/bdb/test/scr015/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr015/chk.cxxtests73
-rw-r--r--storage/bdb/test/scr015/ignore4
-rw-r--r--storage/bdb/test/scr015/testall32
-rw-r--r--storage/bdb/test/scr015/testone122
-rw-r--r--storage/bdb/test/scr016/CallbackTest.java83
-rw-r--r--storage/bdb/test/scr016/CallbackTest.testout54
-rw-r--r--storage/bdb/test/scr016/README37
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.java258
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.testout82
-rw-r--r--storage/bdb/test/scr016/TestAssociate.java333
-rw-r--r--storage/bdb/test/scr016/TestAssociate.testout30
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.java62
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.testout2
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.java477
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testerr0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testout3
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.java326
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.testout3
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.java241
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testerr54
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testout78
-rw-r--r--storage/bdb/test/scr016/TestGetSetMethods.java99
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.java205
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.testout27
-rw-r--r--storage/bdb/test/scr016/TestLockVec.java249
-rw-r--r--storage/bdb/test/scr016/TestLockVec.testout8
-rw-r--r--storage/bdb/test/scr016/TestLogc.java100
-rw-r--r--storage/bdb/test/scr016/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.java199
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--storage/bdb/test/scr016/TestReplication.java289
-rw-r--r--storage/bdb/test/scr016/TestRpcServer.java193
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.java56
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.testout2
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.java37
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr016/TestStat.java94
-rw-r--r--storage/bdb/test/scr016/TestStat.testout18
-rw-r--r--storage/bdb/test/scr016/TestTruncate.java87
-rw-r--r--storage/bdb/test/scr016/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr016/TestUtil.java57
-rw-r--r--storage/bdb/test/scr016/TestXAServlet.java313
-rw-r--r--storage/bdb/test/scr016/chk.javatests79
-rw-r--r--storage/bdb/test/scr016/ignore25
-rw-r--r--storage/bdb/test/scr016/testall32
-rw-r--r--storage/bdb/test/scr016/testone126
-rw-r--r--storage/bdb/test/scr017/O.BH196
-rw-r--r--storage/bdb/test/scr017/O.R196
-rw-r--r--storage/bdb/test/scr017/chk.db18529
-rw-r--r--storage/bdb/test/scr017/t.c188
-rw-r--r--storage/bdb/test/scr018/chk.comma30
-rw-r--r--storage/bdb/test/scr018/t.c46
-rw-r--r--storage/bdb/test/scr019/chk.include44
-rw-r--r--storage/bdb/test/scr020/chk.inc44
-rw-r--r--storage/bdb/test/scr021/chk.flags118
-rw-r--r--storage/bdb/test/scr022/chk.rr27
-rw-r--r--storage/bdb/test/sdb001.tcl147
-rw-r--r--storage/bdb/test/sdb002.tcl228
-rw-r--r--storage/bdb/test/sdb003.tcl182
-rw-r--r--storage/bdb/test/sdb004.tcl241
-rw-r--r--storage/bdb/test/sdb005.tcl146
-rw-r--r--storage/bdb/test/sdb006.tcl169
-rw-r--r--storage/bdb/test/sdb007.tcl141
-rw-r--r--storage/bdb/test/sdb008.tcl94
-rw-r--r--storage/bdb/test/sdb009.tcl108
-rw-r--r--storage/bdb/test/sdb010.tcl170
-rw-r--r--storage/bdb/test/sdb011.tcl141
-rw-r--r--storage/bdb/test/sdb012.tcl428
-rw-r--r--storage/bdb/test/sdbscript.tcl47
-rw-r--r--storage/bdb/test/sdbtest001.tcl150
-rw-r--r--storage/bdb/test/sdbtest002.tcl173
-rw-r--r--storage/bdb/test/sdbutils.tcl197
-rw-r--r--storage/bdb/test/sec001.tcl223
-rw-r--r--storage/bdb/test/sec002.tcl181
-rw-r--r--storage/bdb/test/shelltest.tcl101
-rw-r--r--storage/bdb/test/si001.tcl178
-rw-r--r--storage/bdb/test/si002.tcl175
-rw-r--r--storage/bdb/test/si003.tcl148
-rw-r--r--storage/bdb/test/si004.tcl200
-rw-r--r--storage/bdb/test/si005.tcl135
-rw-r--r--storage/bdb/test/si006.tcl129
-rw-r--r--storage/bdb/test/sindex.tcl259
-rw-r--r--storage/bdb/test/sysscript.tcl282
-rw-r--r--storage/bdb/test/test.tcl1941
-rw-r--r--storage/bdb/test/test001.tcl222
-rw-r--r--storage/bdb/test/test002.tcl161
-rw-r--r--storage/bdb/test/test003.tcl205
-rw-r--r--storage/bdb/test/test004.tcl169
-rw-r--r--storage/bdb/test/test005.tcl19
-rw-r--r--storage/bdb/test/test006.tcl195
-rw-r--r--storage/bdb/test/test007.tcl19
-rw-r--r--storage/bdb/test/test008.tcl200
-rw-r--r--storage/bdb/test/test009.tcl18
-rw-r--r--storage/bdb/test/test010.tcl176
-rw-r--r--storage/bdb/test/test011.tcl470
-rw-r--r--storage/bdb/test/test012.tcl139
-rw-r--r--storage/bdb/test/test013.tcl240
-rw-r--r--storage/bdb/test/test014.tcl253
-rw-r--r--storage/bdb/test/test015.tcl276
-rw-r--r--storage/bdb/test/test016.tcl207
-rw-r--r--storage/bdb/test/test017.tcl316
-rw-r--r--storage/bdb/test/test018.tcl21
-rw-r--r--storage/bdb/test/test019.tcl136
-rw-r--r--storage/bdb/test/test020.tcl137
-rw-r--r--storage/bdb/test/test021.tcl162
-rw-r--r--storage/bdb/test/test022.tcl62
-rw-r--r--storage/bdb/test/test023.tcl219
-rw-r--r--storage/bdb/test/test024.tcl268
-rw-r--r--storage/bdb/test/test025.tcl146
-rw-r--r--storage/bdb/test/test026.tcl155
-rw-r--r--storage/bdb/test/test027.tcl17
-rw-r--r--storage/bdb/test/test028.tcl222
-rw-r--r--storage/bdb/test/test029.tcl247
-rw-r--r--storage/bdb/test/test030.tcl231
-rw-r--r--storage/bdb/test/test031.tcl230
-rw-r--r--storage/bdb/test/test032.tcl231
-rw-r--r--storage/bdb/test/test033.tcl176
-rw-r--r--storage/bdb/test/test034.tcl23
-rw-r--r--storage/bdb/test/test035.tcl22
-rw-r--r--storage/bdb/test/test036.tcl173
-rw-r--r--storage/bdb/test/test037.tcl196
-rw-r--r--storage/bdb/test/test038.tcl227
-rw-r--r--storage/bdb/test/test039.tcl211
-rw-r--r--storage/bdb/test/test040.tcl23
-rw-r--r--storage/bdb/test/test041.tcl18
-rw-r--r--storage/bdb/test/test042.tcl184
-rw-r--r--storage/bdb/test/test043.tcl192
-rw-r--r--storage/bdb/test/test044.tcl252
-rw-r--r--storage/bdb/test/test045.tcl126
-rw-r--r--storage/bdb/test/test046.tcl813
-rw-r--r--storage/bdb/test/test047.tcl258
-rw-r--r--storage/bdb/test/test048.tcl171
-rw-r--r--storage/bdb/test/test049.tcl184
-rw-r--r--storage/bdb/test/test050.tcl221
-rw-r--r--storage/bdb/test/test051.tcl222
-rw-r--r--storage/bdb/test/test052.tcl269
-rw-r--r--storage/bdb/test/test053.tcl231
-rw-r--r--storage/bdb/test/test054.tcl460
-rw-r--r--storage/bdb/test/test055.tcl141
-rw-r--r--storage/bdb/test/test056.tcl169
-rw-r--r--storage/bdb/test/test057.tcl201
-rw-r--r--storage/bdb/test/test058.tcl103
-rw-r--r--storage/bdb/test/test059.tcl150
-rw-r--r--storage/bdb/test/test060.tcl60
-rw-r--r--storage/bdb/test/test061.tcl226
-rw-r--r--storage/bdb/test/test062.tcl153
-rw-r--r--storage/bdb/test/test063.tcl174
-rw-r--r--storage/bdb/test/test064.tcl69
-rw-r--r--storage/bdb/test/test065.tcl208
-rw-r--r--storage/bdb/test/test066.tcl99
-rw-r--r--storage/bdb/test/test067.tcl162
-rw-r--r--storage/bdb/test/test068.tcl231
-rw-r--r--storage/bdb/test/test069.tcl14
-rw-r--r--storage/bdb/test/test070.tcl136
-rw-r--r--storage/bdb/test/test071.tcl16
-rw-r--r--storage/bdb/test/test072.tcl257
-rw-r--r--storage/bdb/test/test073.tcl290
-rw-r--r--storage/bdb/test/test074.tcl275
-rw-r--r--storage/bdb/test/test075.tcl205
-rw-r--r--storage/bdb/test/test076.tcl84
-rw-r--r--storage/bdb/test/test077.tcl93
-rw-r--r--storage/bdb/test/test078.tcl245
-rw-r--r--storage/bdb/test/test079.tcl29
-rw-r--r--storage/bdb/test/test080.tcl126
-rw-r--r--storage/bdb/test/test081.tcl15
-rw-r--r--storage/bdb/test/test082.tcl14
-rw-r--r--storage/bdb/test/test083.tcl166
-rw-r--r--storage/bdb/test/test084.tcl53
-rw-r--r--storage/bdb/test/test085.tcl331
-rw-r--r--storage/bdb/test/test086.tcl166
-rw-r--r--storage/bdb/test/test087.tcl290
-rw-r--r--storage/bdb/test/test088.tcl174
-rw-r--r--storage/bdb/test/test089.tcl264
-rw-r--r--storage/bdb/test/test090.tcl16
-rw-r--r--storage/bdb/test/test091.tcl20
-rw-r--r--storage/bdb/test/test092.tcl247
-rw-r--r--storage/bdb/test/test093.tcl393
-rw-r--r--storage/bdb/test/test094.tcl189
-rw-r--r--storage/bdb/test/test095.tcl367
-rw-r--r--storage/bdb/test/test096.tcl371
-rw-r--r--storage/bdb/test/test097.tcl188
-rw-r--r--storage/bdb/test/test098.tcl91
-rw-r--r--storage/bdb/test/test099.tcl276
-rw-r--r--storage/bdb/test/test100.tcl17
-rw-r--r--storage/bdb/test/test101.tcl17
-rw-r--r--storage/bdb/test/testparams.tcl336
-rw-r--r--storage/bdb/test/testutils.tcl3458
-rw-r--r--storage/bdb/test/txn001.tcl116
-rw-r--r--storage/bdb/test/txn002.tcl91
-rw-r--r--storage/bdb/test/txn003.tcl238
-rw-r--r--storage/bdb/test/txn004.tcl62
-rw-r--r--storage/bdb/test/txn005.tcl75
-rw-r--r--storage/bdb/test/txn006.tcl47
-rw-r--r--storage/bdb/test/txn007.tcl57
-rw-r--r--storage/bdb/test/txn008.tcl32
-rw-r--r--storage/bdb/test/txn009.tcl32
-rw-r--r--storage/bdb/test/txnscript.tcl67
-rw-r--r--storage/bdb/test/update.tcl93
-rw-r--r--storage/bdb/test/upgrade.tcl745
-rw-r--r--storage/bdb/test/wordlist10001
-rw-r--r--storage/bdb/test/wrap.tcl71
320 files changed, 0 insertions, 67462 deletions
diff --git a/storage/bdb/test/archive.tcl b/storage/bdb/test/archive.tcl
deleted file mode 100644
index fa68f633b0f..00000000000
--- a/storage/bdb/test/archive.tcl
+++ /dev/null
@@ -1,256 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: archive.tcl,v 11.26 2004/09/22 18:01:04 bostic Exp $
-#
-# Options are:
-# -checkrec <checkpoint frequency"
-# -dir <dbhome directory>
-# -maxfilesize <maxsize of log file>
-proc archive { { inmem 0 } args } {
- global alphabet
- source ./include.tcl
-
- # Set defaults
- if { $inmem == 1 } {
- set maxbsize [expr 8 * [expr 1024 * 1024]]
- set desc "in-memory"
- } else {
- set maxbsize [expr 8 * 1024]
- set desc "on-disk"
- }
- set maxfile [expr 32 * 1024]
- set checkrec 500
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -c.* { incr i; set checkrec [lindex $args $i] }
- -d.* { incr i; set testdir [lindex $args $i] }
- -m.* { incr i; set maxfile [lindex $args $i] }
- default {
- puts "FAIL:[timestamp] archive usage"
- puts "usage: archive -checkrec <checkpt freq> \
- -dir <directory> -maxfilesize <max size of log files>"
- return
- }
- }
- }
-
- # Clean out old log if it existed
- puts "Archive: Log archive test (using $desc logging)."
- puts "Unlinking log: error message OK"
- env_cleanup $testdir
-
- # Now run the various functionality tests
- if { $inmem == 0 } {
- set eflags "-create -txn -home $testdir \
- -log_buffer $maxbsize -log_max $maxfile"
- } else {
- set eflags "-create -txn -home $testdir -log_inmemory \
- -log_buffer $maxbsize -log_max $maxfile"
- }
- set dbenv [eval {berkdb_env} $eflags]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set logc [$dbenv log_cursor]
- error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
-
- # The basic test structure here is that we write a lot of log
- # records (enough to fill up 100 log files; each log file it
- # small). We start with three txns and open a database in
- # each transaction. Then, in a loop, we take periodic
- # checkpoints. Between each pair of checkpoints, we end one
- # transaction; when no transactions are left, we start up three
- # new ones, letting them overlap checkpoints as well.
- #
- # The pattern that we create is:
- # 1. Create TXN1, TXN2, TXN3 and open dbs within the txns.
- # 2. Write a bunch of additional log records.
- # 3. Checkpoint.
- # 4. Archive, checking that we list the right files.
- # 5. Commit one transaction.
- # 6. If no txns left, start 3 new ones.
- # 7. Until we've gone through enough records, return to step 2.
-
- set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
- puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
- set nrecs $maxfile
- set rec 0:$baserec
-
- # Begin 1st transaction and record current log file. Open
- # a database in the transaction; the log file won't be
- # removable until the transaction is aborted or committed.
- set t1 [$dbenv txn]
- error_check_good t1:txn_begin [is_valid_txn $t1 $dbenv] TRUE
-
- set l1 [lindex [lindex [$logc get -last] 0] 0]
- set lsnlist [list $l1]
-
- set tdb1 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t1 -btree tdb1.db]
- error_check_good dbopen [is_valid_db $tdb1] TRUE
-
- # Do the same for a 2nd and 3rd transaction.
- set t2 [$dbenv txn]
- error_check_good t2:txn_begin [is_valid_txn $t2 $dbenv] TRUE
- set l2 [lindex [lindex [$logc get -last] 0] 0]
- lappend lsnlist $l2
- set tdb2 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t2 -btree tdb2.db]
- error_check_good dbopen [is_valid_db $tdb2] TRUE
-
- set t3 [$dbenv txn]
- error_check_good t3:txn_begin [is_valid_txn $t3 $dbenv] TRUE
- set l3 [lindex [lindex [$logc get -last] 0] 0]
- lappend lsnlist $l3
- set tdb3 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t3 -btree tdb3.db]
- error_check_good dbopen [is_valid_db $tdb3] TRUE
-
- # Keep a list of active transactions and databases opened
- # within those transactions.
- set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"]
-
- # Loop through a large number of log records, checkpointing
- # and checking db_archive periodically.
- for { set i 1 } { $i <= $nrecs } { incr i } {
- set rec $i:$baserec
- set lsn [$dbenv log_put $rec]
- error_check_bad log_put [llength $lsn] 0
- if { [expr $i % $checkrec] == 0 } {
-
- # Take a checkpoint
- $dbenv txn_checkpoint
- set ckp_file [lindex [lindex [$logc get -last] 0] 0]
- catch { archive_command -h $testdir -a } res_log_full
- if { [string first db_archive $res_log_full] == 0 } {
- set res_log_full ""
- }
- catch { archive_command -h $testdir } res_log
- if { [string first db_archive $res_log] == 0 } {
- set res_log ""
- }
- catch { archive_command -h $testdir -l } res_alllog
- catch { archive_command -h $testdir -a -s } \
- res_data_full
- catch { archive_command -h $testdir -s } res_data
-
- if { $inmem == 0 } {
- error_check_good nlogfiles [llength $res_alllog] \
- [lindex [lindex [$logc get -last] 0] 0]
- } else {
- error_check_good nlogfiles [llength $res_alllog] 0
- }
-
- error_check_good logs_match [llength $res_log_full] \
- [llength $res_log]
- error_check_good data_match [llength $res_data_full] \
- [llength $res_data]
-
- # Check right number of log files
- if { $inmem == 0 } {
- set expected [min $ckp_file [expr [lindex $lsnlist 0] - 1]]
- error_check_good nlogs [llength $res_log] $expected
- }
-
- # Check that the relative names are a subset of the
- # full names
- set n 0
- foreach x $res_log {
- error_check_bad log_name_match:$res_log \
- [string first $x \
- [lindex $res_log_full $n]] -1
- incr n
- }
-
- set n 0
- foreach x $res_data {
- error_check_bad log_name_match:$res_data \
- [string first $x \
- [lindex $res_data_full $n]] -1
- incr n
- }
-
- # Commit a transaction and close the associated db.
- set t [lindex [lindex $txnlist 0] 0]
- set tdb [lindex [lindex $txnlist 0] 1]
- if { [string length $t] != 0 } {
- error_check_good txn_commit:$t [$t commit] 0
- error_check_good tdb_close:$tdb [$tdb close] 0
- set txnlist [lrange $txnlist 1 end]
- set lsnlist [lrange $lsnlist 1 end]
- }
-
- # If we're down to no transactions, start some new ones.
- if { [llength $txnlist] == 0 } {
- set t1 [$dbenv txn]
- error_check_bad tx_begin $t1 NULL
- error_check_good \
- tx_begin [is_substr $t1 $dbenv] 1
- set tdb1 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t1 -btree tdb1.db]
- error_check_good dbopen [is_valid_db $tdb1] TRUE
- set l1 [lindex [lindex [$logc get -last] 0] 0]
- lappend lsnlist $l1
-
- set t2 [$dbenv txn]
- error_check_bad tx_begin $t2 NULL
- error_check_good \
- tx_begin [is_substr $t2 $dbenv] 1
- set tdb2 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t2 -btree tdb2.db]
- error_check_good dbopen [is_valid_db $tdb2] TRUE
- set l2 [lindex [lindex [$logc get -last] 0] 0]
- lappend lsnlist $l2
-
- set t3 [$dbenv txn]
- error_check_bad tx_begin $t3 NULL
- error_check_good \
- tx_begin [is_substr $t3 $dbenv] 1
- set tdb3 [eval {berkdb_open -create -mode 0644} \
- -env $dbenv -txn $t3 -btree tdb3.db]
- error_check_good dbopen [is_valid_db $tdb3] TRUE
- set l3 [lindex [lindex [$logc get -last] 0] 0]
- lappend lsnlist $l3
-
- set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"]
- }
- }
- }
- # Commit any transactions still running.
- puts "\tArchive.b: Commit any transactions still running."
- foreach pair $txnlist {
- set t [lindex $pair 0]
- set tdb [lindex $pair 1]
- error_check_good txn_commit:$t [$t commit] 0
- error_check_good tdb_close:$tdb [$tdb close] 0
- }
-
- # Close and unlink the file
- error_check_good log_cursor_close [$logc close] 0
- reset_env $dbenv
-}
-
-proc archive_command { args } {
- source ./include.tcl
-
- # Catch a list of files output by db_archive.
- catch { eval exec $util_path/db_archive $args } output
-
- if { $is_windows_test == 1 || 1 } {
- # On Windows, convert all filenames to use forward slashes.
- regsub -all {[\\]} $output / output
- }
-
- # Output the [possibly-transformed] list.
- return $output
-}
-
-proc min { a b } {
- if {$a < $b} {
- return $a
- } else {
- return $b
- }
-}
diff --git a/storage/bdb/test/bigfile001.tcl b/storage/bdb/test/bigfile001.tcl
deleted file mode 100644
index 39981413829..00000000000
--- a/storage/bdb/test/bigfile001.tcl
+++ /dev/null
@@ -1,85 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: bigfile001.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $
-#
-# TEST bigfile001
-# TEST Create a database greater than 4 GB in size. Close, verify.
-# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
-# TEST repeat. Since it will not work on all systems, this test is
-# TEST not run by default.
-proc bigfile001 { method \
- { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
-
- env_cleanup $testdir
-
- # Create the database. Use 64K pages; we want a good fill
- # factor, and page size doesn't matter much. Use a 50MB
- # cache; that should be manageable, and will help
- # performance.
- set dbname $testdir/big.db
-
- set db [eval {berkdb_open -create} {-pagesize 65536 \
- -cachesize {0 50000000 0}} $omethod $args $dbname]
- error_check_good db_open [is_valid_db $db] TRUE
-
- puts -nonewline "\tBigfile.a: Creating database...0%..."
- flush stdout
-
- set data [string repeat z $itemsize]
-
- set more_than_ten_already 0
- for { set i 0 } { $i < $nitems } { incr i } {
- set key key[format %08u $i]
-
- error_check_good db_put($i) [$db put $key $data] 0
-
- if { $i % 5000 == 0 } {
- set pct [expr 100 * $i / $nitems]
- puts -nonewline "\b\b\b\b\b"
- if { $pct >= 10 } {
- if { $more_than_ten_already } {
- puts -nonewline "\b"
- } else {
- set more_than_ten_already 1
- }
- }
-
- puts -nonewline "$pct%..."
- flush stdout
- }
- }
- puts "\b\b\b\b\b\b100%..."
- error_check_good db_close [$db close] 0
-
- puts "\tBigfile.b: Verifying database..."
- error_check_good verify \
- [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
-
- puts "\tBigfile.c: Grow database $growtms times by $growby items"
-
- for { set j 0 } { $j < $growtms } { incr j } {
- set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
- error_check_good db_open [is_valid_db $db] TRUE
- puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
- flush stdout
- for { set i 0 } { $i < $growby } { incr i } {
- set key key[format %08u $i].$j
- error_check_good db_put($j.$i) [$db put $key $data] 0
- }
- error_check_good db_close [$db close] 0
- puts "done."
-
- puts "\t\tBigfile.c.2: Verifying database..."
- error_check_good verify($j) \
- [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
- }
-}
diff --git a/storage/bdb/test/bigfile002.tcl b/storage/bdb/test/bigfile002.tcl
deleted file mode 100644
index 6686f9ac627..00000000000
--- a/storage/bdb/test/bigfile002.tcl
+++ /dev/null
@@ -1,45 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: bigfile002.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $
-#
-# TEST bigfile002
-# TEST This one should be faster and not require so much disk space,
-# TEST although it doesn't test as extensively. Create an mpool file
-# TEST with 1K pages. Dirty page 6000000. Sync.
-proc bigfile002 { args } {
- source ./include.tcl
-
- puts -nonewline \
- "Bigfile002: Creating large, sparse file through mpool..."
- flush stdout
-
- env_cleanup $testdir
-
- # Create env.
- set env [berkdb_env -create -home $testdir]
- error_check_good valid_env [is_valid_env $env] TRUE
-
- # Create the file.
- set name big002.file
- set file [$env mpool -create -pagesize 1024 $name]
-
- # Dirty page 6000000
- set pg [$file get -create 6000000]
- error_check_good pg_init [$pg init A] 0
- error_check_good pg_set [$pg is_setto A] 1
-
- # Put page back.
- error_check_good pg_put [$pg put -dirty] 0
-
- # Fsync.
- error_check_good fsync [$file fsync] 0
-
- puts "succeeded."
-
- # Close.
- error_check_good fclose [$file close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/byteorder.tcl b/storage/bdb/test/byteorder.tcl
deleted file mode 100644
index d94f5a0146a..00000000000
--- a/storage/bdb/test/byteorder.tcl
+++ /dev/null
@@ -1,34 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: byteorder.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $
-#
-# Byte Order Test
-# Use existing tests and run with both byte orders.
-proc byteorder { method {nentries 1000} } {
- source ./include.tcl
- puts "Byteorder: $method $nentries"
-
- eval {test001 $method $nentries 0 0 "001" -lorder 1234}
- eval {verify_dir $testdir}
- eval {test001 $method $nentries 0 0 "001" -lorder 4321}
- eval {verify_dir $testdir}
- eval {test003 $method -lorder 1234}
- eval {verify_dir $testdir}
- eval {test003 $method -lorder 4321}
- eval {verify_dir $testdir}
- eval {test010 $method $nentries 5 "010" -lorder 1234}
- eval {verify_dir $testdir}
- eval {test010 $method $nentries 5 "010" -lorder 4321}
- eval {verify_dir $testdir}
- eval {test011 $method $nentries 5 "011" -lorder 1234}
- eval {verify_dir $testdir}
- eval {test011 $method $nentries 5 "011" -lorder 4321}
- eval {verify_dir $testdir}
- eval {test018 $method $nentries -lorder 1234}
- eval {verify_dir $testdir}
- eval {test018 $method $nentries -lorder 4321}
- eval {verify_dir $testdir}
-}
diff --git a/storage/bdb/test/conscript.tcl b/storage/bdb/test/conscript.tcl
deleted file mode 100644
index 8740ae21425..00000000000
--- a/storage/bdb/test/conscript.tcl
+++ /dev/null
@@ -1,124 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: conscript.tcl,v 11.25 2004/01/28 03:36:26 bostic Exp $
-#
-# Script for DB_CONSUME test (test070.tcl).
-# Usage: conscript dir file runtype nitems outputfile tnum args
-# dir: DBHOME directory
-# file: db file on which to operate
-# runtype: PRODUCE, CONSUME or WAIT -- which am I?
-# nitems: number of items to put or get
-# outputfile: where to log consumer results
-# tnum: test number
-
-proc consumescript_produce { db_cmd nitems tnum args } {
- source ./include.tcl
- global mydata
-
- set pid [pid]
- puts "\tTest$tnum: Producer $pid starting, producing $nitems items."
-
- set db [eval $db_cmd]
- error_check_good db_open:$pid [is_valid_db $db] TRUE
-
- set oret -1
- set ret 0
- for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
- set oret $ret
- if { 0xffffffff > 0 && $oret > 0x7fffffff } {
- incr oret [expr 0 - 0x100000000]
- }
- set ret [$db put -append [chop_data q $mydata]]
- error_check_good db_put \
- [expr $oret > $ret ? \
- ($oret > 0x7fffffff && $ret < 0x7fffffff) : 1] 1
-
- }
-
- set ret [catch {$db close} res]
- error_check_good db_close:$pid $ret 0
- puts "\t\tTest$tnum: Producer $pid finished."
-}
-
-proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
- source ./include.tcl
- global mydata
- set pid [pid]
- puts "\tTest$tnum: Consumer $pid starting, seeking $nitems items."
-
- set db [eval $db_cmd]
- error_check_good db_open:$pid [is_valid_db $db] TRUE
-
- set oid [open $outputfile a]
-
- for { set ndx 0 } { $ndx < $nitems } { } {
- set ret [$db get $mode]
- if { [llength $ret] > 0 } {
- error_check_good correct_data:$pid \
- [lindex [lindex $ret 0] 1] [pad_data q $mydata]
- set rno [lindex [lindex $ret 0] 0]
- puts $oid $rno
- incr ndx
- } else {
- # No data to consume; wait.
- }
- }
-
- error_check_good output_close:$pid [close $oid] ""
-
- set ret [catch {$db close} res]
- error_check_good db_close:$pid $ret 0
- puts "\t\tTest$tnum: Consumer $pid finished."
-}
-
-source ./include.tcl
-source $test_path/test.tcl
-
-# Verify usage
-if { $argc < 6 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-set usage "conscript.tcl dir file runtype nitems outputfile tnum"
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set file [lindex $argv 1]
-set runtype [lindex $argv 2]
-set nitems [lindex $argv 3]
-set outputfile [lindex $argv 4]
-set tnum [lindex $argv 5]
-# args is the string "{ -len 20 -pad 0}", so we need to extract the
-# " -len 20 -pad 0" part.
-set args [lindex [lrange $argv 6 end] 0]
-
-set mydata "consumer data"
-
-# Open env
-set dbenv [berkdb_env -home $dir ]
-error_check_good db_env_create [is_valid_env $dbenv] TRUE
-
-# Figure out db opening command.
-set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
- $dbenv $args $file]
-
-# Invoke consumescript_produce or consumescript_consume based on $runtype
-if { $runtype == "PRODUCE" } {
- # Producers have nothing to log; make sure outputfile is null.
- error_check_good no_producer_outputfile $outputfile ""
- consumescript_produce $db_cmd $nitems $tnum $args
-} elseif { $runtype == "CONSUME" } {
- consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
-} elseif { $runtype == "WAIT" } {
- consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
- $args
-} else {
- error_check_good bad_args $runtype \
- "either PRODUCE, CONSUME, or WAIT"
-}
-error_check_good env_close [$dbenv close] 0
-exit
diff --git a/storage/bdb/test/dbm.tcl b/storage/bdb/test/dbm.tcl
deleted file mode 100644
index 49a0f3e13d1..00000000000
--- a/storage/bdb/test/dbm.tcl
+++ /dev/null
@@ -1,128 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dbm.tcl,v 11.17 2004/01/28 03:36:26 bostic Exp $
-#
-# TEST dbm
-# TEST Historic DBM interface test. Use the first 1000 entries from the
-# TEST dictionary. Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Then reopen the file, re-retrieve everything. Finally, delete
-# TEST everything.
-proc dbm { { nentries 1000 } } {
- source ./include.tcl
-
- puts "DBM interfaces test: $nentries"
-
- # Create the database and open the dictionary
- set testfile $testdir/dbmtest
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir NULL
-
- error_check_good dbminit [berkdb dbminit $testfile] 0
- set did [open $dict]
-
- set flags ""
- set txn ""
- set count 0
- set skippednullkey 0
-
- puts "\tDBM.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- # DBM can't handle zero-length keys
- if { [string length $str] == 0 } {
- set skippednullkey 1
- continue
- }
-
- set ret [berkdb store $str $str]
- error_check_good dbm_store $ret 0
-
- set d [berkdb fetch $str]
- error_check_good dbm_fetch $d $str
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tDBM.b: dump file"
- set oid [open $t1 w]
- for { set key [berkdb firstkey] } { $key != -1 } {\
- set key [berkdb nextkey $key] } {
- puts $oid $key
- set d [berkdb fetch $key]
- error_check_good dbm_refetch $d $key
- }
-
- # If we had to skip a zero-length key, juggle things to cover up
- # this fact in the dump.
- if { $skippednullkey == 1 } {
- puts $oid ""
- incr nentries 1
- }
-
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
-
- error_check_good DBM:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tDBM.c: close, open, and dump file"
-
- # Now, reopen the file and run the last test again.
- error_check_good dbminit2 [berkdb dbminit $testfile] 0
- set oid [open $t1 w]
-
- for { set key [berkdb firstkey] } { $key != -1 } {\
- set key [berkdb nextkey $key] } {
- puts $oid $key
- set d [berkdb fetch $key]
- error_check_good dbm_refetch $d $key
- }
- if { $skippednullkey == 1 } {
- puts $oid ""
- }
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- filesort $t1 $t3
-
- error_check_good DBM:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and delete each entry
- puts "\tDBM.d: sequential scan and delete"
-
- error_check_good dbminit3 [berkdb dbminit $testfile] 0
- set oid [open $t1 w]
-
- for { set key [berkdb firstkey] } { $key != -1 } {\
- set key [berkdb nextkey $key] } {
- puts $oid $key
- set ret [berkdb delete $key]
- error_check_good dbm_delete $ret 0
- }
- if { $skippednullkey == 1 } {
- puts $oid ""
- }
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- filesort $t1 $t3
-
- error_check_good DBM:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- error_check_good "dbm_close" [berkdb dbmclose] 0
-}
diff --git a/storage/bdb/test/dbscript.tcl b/storage/bdb/test/dbscript.tcl
deleted file mode 100644
index 8ddcec09d35..00000000000
--- a/storage/bdb/test/dbscript.tcl
+++ /dev/null
@@ -1,357 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dbscript.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $
-#
-# Random db tester.
-# Usage: dbscript file numops min_del max_add key_avg data_avgdups
-# method: method (we pass this in so that fixed-length records work)
-# file: db file on which to operate
-# numops: number of operations to do
-# ncurs: number of cursors
-# min_del: minimum number of keys before you disable deletes.
-# max_add: maximum number of keys before you disable adds.
-# key_avg: average key size
-# data_avg: average data size
-# dups: 1 indicates dups allowed, 0 indicates no dups
-# errpct: What percent of operations should generate errors
-# seed: Random number generator seed (-1 means use pid)
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
-
-# Verify usage
-if { $argc != 10 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set method [lindex $argv 0]
-set file [lindex $argv 1]
-set numops [ lindex $argv 2 ]
-set ncurs [ lindex $argv 3 ]
-set min_del [ lindex $argv 4 ]
-set max_add [ lindex $argv 5 ]
-set key_avg [ lindex $argv 6 ]
-set data_avg [ lindex $argv 7 ]
-set dups [ lindex $argv 8 ]
-set errpct [ lindex $argv 9 ]
-
-berkdb srand $rand_init
-
-puts "Beginning execution for [pid]"
-puts "$file database"
-puts "$numops Operations"
-puts "$ncurs cursors"
-puts "$min_del keys before deletes allowed"
-puts "$max_add or fewer keys to add"
-puts "$key_avg average key length"
-puts "$data_avg average data length"
-if { $dups != 1 } {
- puts "No dups"
-} else {
- puts "Dups allowed"
-}
-puts "$errpct % Errors"
-
-flush stdout
-
-set db [berkdb_open $file]
-set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
-if {$cerr != 0} {
- puts $cret
- return
-}
-# set method [$db get_type]
-set record_based [is_record_based $method]
-
-# Initialize globals including data
-global nkeys
-global l_keys
-global a_keys
-
-set nkeys [db_init $db 1]
-puts "Initial number of keys: $nkeys"
-
-set pflags ""
-set gflags ""
-set txn ""
-
-# Open the cursors
-set curslist {}
-for { set i 0 } { $i < $ncurs } { incr i } {
- set dbc [$db cursor]
- set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- lappend curslist $dbc
-
-}
-
-# On each iteration we're going to generate random keys and
-# data. We'll select either a get/put/delete operation unless
-# we have fewer than min_del keys in which case, delete is not
-# an option or more than max_add in which case, add is not
-# an option. The tcl global arrays a_keys and l_keys keep track
-# of key-data pairs indexed by key and a list of keys, accessed
-# by integer.
-set adds 0
-set puts 0
-set gets 0
-set dels 0
-set bad_adds 0
-set bad_puts 0
-set bad_gets 0
-set bad_dels 0
-
-for { set iter 0 } { $iter < $numops } { incr iter } {
- set op [pick_op $min_del $max_add $nkeys]
- set err [is_err $errpct]
-
- # The op0's indicate that there aren't any duplicates, so we
- # exercise regular operations. If dups is 1, then we'll use
- # cursor ops.
- switch $op$dups$err {
- add00 {
- incr adds
-
- set k [random_data $key_avg 1 a_keys $record_based]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set ret [eval {$db put} $txn $pflags \
- {-nooverwrite $k $data}]
- set cerr [catch {error_check_good put $ret 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- newpair $k [pad_data $method $data]
- }
- add01 {
- incr bad_adds
- set k [random_key]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set ret [eval {$db put} $txn $pflags \
- {-nooverwrite $k $data}]
- set cerr [catch {error_check_good put $ret 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- # Error case so no change to data state
- }
- add10 {
- incr adds
- set dbcinfo [random_cursor $curslist]
- set dbc [lindex $dbcinfo 0]
- if { [berkdb random_int 1 2] == 1 } {
- # Add a new key
- set k [random_data $key_avg 1 a_keys \
- $record_based]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set ret [eval {$dbc put} $txn \
- {-keyfirst $k $data}]
- newpair $k [pad_data $method $data]
- } else {
- # Add a new duplicate
- set dbc [lindex $dbcinfo 0]
- set k [lindex $dbcinfo 1]
- set data [random_data $data_avg 0 0]
-
- set op [pick_cursput]
- set data [chop_data $method $data]
- set ret [eval {$dbc put} $txn {$op $k $data}]
- adddup $k [lindex $dbcinfo 2] $data
- }
- }
- add11 {
- # TODO
- incr bad_adds
- set ret 1
- }
- put00 {
- incr puts
- set k [random_key]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set ret [eval {$db put} $txn {$k $data}]
- changepair $k [pad_data $method $data]
- }
- put01 {
- incr bad_puts
- set k [random_key]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set ret [eval {$db put} $txn $pflags \
- {-nooverwrite $k $data}]
- set cerr [catch {error_check_good put $ret 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- # Error case so no change to data state
- }
- put10 {
- incr puts
- set dbcinfo [random_cursor $curslist]
- set dbc [lindex $dbcinfo 0]
- set k [lindex $dbcinfo 1]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
-
- set ret [eval {$dbc put} $txn {-current $data}]
- changedup $k [lindex $dbcinfo 2] $data
- }
- put11 {
- incr bad_puts
- set k [random_key]
- set data [random_data $data_avg 0 0]
- set data [chop_data $method $data]
- set dbc [$db cursor]
- set ret [eval {$dbc put} $txn {-current $data}]
- set cerr [catch {error_check_good curs_close \
- [$dbc close] 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- # Error case so no change to data state
- }
- get00 {
- incr gets
- set k [random_key]
- set val [eval {$db get} $txn {$k}]
- set data [pad_data $method [lindex [lindex $val 0] 1]]
- if { $data == $a_keys($k) } {
- set ret 0
- } else {
- set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
- }
- # Get command requires no state change
- }
- get01 {
- incr bad_gets
- set k [random_data $key_avg 1 a_keys $record_based]
- set ret [eval {$db get} $txn {$k}]
- # Error case so no change to data state
- }
- get10 {
- incr gets
- set dbcinfo [random_cursor $curslist]
- if { [llength $dbcinfo] == 3 } {
- set ret 0
- else
- set ret 0
- }
- # Get command requires no state change
- }
- get11 {
- incr bad_gets
- set k [random_key]
- set dbc [$db cursor]
- if { [berkdb random_int 1 2] == 1 } {
- set dir -next
- } else {
- set dir -prev
- }
- set ret [eval {$dbc get} $txn {-next $k}]
- set cerr [catch {error_check_good curs_close \
- [$dbc close] 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- # Error and get case so no change to data state
- }
- del00 {
- incr dels
- set k [random_key]
- set ret [eval {$db del} $txn {$k}]
- rempair $k
- }
- del01 {
- incr bad_dels
- set k [random_data $key_avg 1 a_keys $record_based]
- set ret [eval {$db del} $txn {$k}]
- # Error case so no change to data state
- }
- del10 {
- incr dels
- set dbcinfo [random_cursor $curslist]
- set dbc [lindex $dbcinfo 0]
- set ret [eval {$dbc del} $txn]
- remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
- }
- del11 {
- incr bad_dels
- set c [$db cursor]
- set ret [eval {$c del} $txn]
- set cerr [catch {error_check_good curs_close \
- [$c close] 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- # Error case so no change to data state
- }
- }
- if { $err == 1 } {
- # Verify failure.
- set cerr [catch {error_check_good $op$dups$err:$k \
- [is_substr Error $ret] 1} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- } else {
- # Verify success
- set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
- }
-
- flush stdout
-}
-
-# Close cursors and file
-foreach i $curslist {
- set r [$i close]
- set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
- if {$cerr != 0} {
- puts $cret
- return
- }
-}
-
-set r [$db close]
-set cerr [catch {error_check_good db_close:$db $r 0} cret]
-if {$cerr != 0} {
- puts $cret
- return
-}
-
-puts "[timestamp] [pid] Complete"
-puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
-puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
-flush stdout
-
-filecheck $file $txn
-
-exit
diff --git a/storage/bdb/test/ddoyscript.tcl b/storage/bdb/test/ddoyscript.tcl
deleted file mode 100644
index 30e6c34e0bc..00000000000
--- a/storage/bdb/test/ddoyscript.tcl
+++ /dev/null
@@ -1,172 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: ddoyscript.tcl,v 11.8 2004/01/28 03:36:26 bostic Exp $
-#
-# Deadlock detector script tester.
-# Usage: ddoyscript dir lockerid numprocs
-# dir: DBHOME directory
-# lockerid: Lock id for this locker
-# numprocs: Total number of processes running
-# myid: id of this process --
-# the order that the processes are created is the same
-# in which their lockerid's were allocated so we know
-# that there is a locker age relationship that is isomorphic
-# with the order releationship of myid's.
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "ddoyscript dir lockerid numprocs oldoryoung"
-
-# Verify usage
-if { $argc != 5 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set lockerid [ lindex $argv 1 ]
-set numprocs [ lindex $argv 2 ]
-set old_or_young [lindex $argv 3]
-set myid [lindex $argv 4]
-
-set myenv [berkdb_env -lock -home $dir -create -mode 0644]
-error_check_bad lock_open $myenv NULL
-error_check_good lock_open [is_substr $myenv "env"] 1
-
-# There are two cases here -- oldest/youngest or a ring locker.
-
-if { $myid == 0 || $myid == [expr $numprocs - 1] } {
- set waitobj NULL
- set ret 0
-
- if { $myid == 0 } {
- set objid 2
- if { $old_or_young == "o" } {
- set waitobj [expr $numprocs - 1]
- }
- } else {
- if { $old_or_young == "y" } {
- set waitobj 0
- }
- set objid 4
- }
-
- # Acquire own read lock
- if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
- puts $errorInfo
- } else {
- error_check_good selfget:$objid [is_substr $selflock $myenv] 1
- }
-
- # Acquire read lock
- if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
- puts $errorInfo
- } else {
- error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
- }
-
- tclsleep 10
-
- if { $waitobj == "NULL" } {
- # Sleep for a good long while
- tclsleep 90
- } else {
- # Acquire write lock
- if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
- != 0} {
- puts $errorInfo
- set ret ERROR
- } else {
- error_check_good lockget:$waitobj \
- [is_substr $lock2 $myenv] 1
-
- # Now release it
- if {[catch {$lock2 put} err] != 0} {
- puts $errorInfo
- set ret ERROR
- } else {
- error_check_good lockput:oy:$objid $err 0
- }
- }
-
- }
-
- # Release self lock
- if {[catch {$selflock put} err] != 0} {
- puts $errorInfo
- if { $ret == 0 } {
- set ret ERROR
- }
- } else {
- error_check_good selfput:oy:$myid $err 0
- if { $ret == 0 } {
- set ret 1
- }
- }
-
- # Release first lock
- if {[catch {$lock1 put} err] != 0} {
- puts $errorInfo
- if { $ret == 0 } {
- set ret ERROR
- }
- } else {
- error_check_good lockput:oy:$objid $err 0
- if { $ret == 0 } {
- set ret 1
- }
- }
-
-} else {
- # Make sure that we succeed if we're locking the same object as
- # oldest or youngest.
- if { [expr $myid % 2] == 0 } {
- set mode read
- } else {
- set mode write
- }
- # Obtain first lock (should always succeed).
- if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
- puts $errorInfo
- } else {
- error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
- }
-
- tclsleep 30
-
- set nextobj [expr $myid + 1]
- if { $nextobj == [expr $numprocs - 1] } {
- set nextobj 1
- }
-
- set ret 1
- if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
- if {[string match "*DEADLOCK*" $lock2] == 1} {
- set ret DEADLOCK
- } else {
- set ret ERROR
- }
- } else {
- error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
- }
-
- # Now release the first lock
- error_check_good lockput:$lock1 [$lock1 put] 0
-
- if {$ret == 1} {
- error_check_bad lockget:$nextobj $lock2 NULL
- error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
- error_check_good lockput:$lock2 [$lock2 put] 0
- }
-}
-
-puts $ret
-error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
-error_check_good envclose [$myenv close] 0
-exit
diff --git a/storage/bdb/test/ddscript.tcl b/storage/bdb/test/ddscript.tcl
deleted file mode 100644
index 173cb2a69b4..00000000000
--- a/storage/bdb/test/ddscript.tcl
+++ /dev/null
@@ -1,44 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: ddscript.tcl,v 11.15 2004/03/18 20:58:14 carol Exp $
-#
-# Deadlock detector script tester.
-# Usage: ddscript dir test lockerid objid numprocs
-# dir: DBHOME directory
-# test: Which test to run
-# lockerid: Lock id for this locker
-# objid: Object id to lock.
-# numprocs: Total number of processes running
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "ddscript dir test lockerid objid numprocs"
-
-# Verify usage
-if { $argc != 5 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set test [ lindex $argv 1 ]
-set lockerid [ lindex $argv 2 ]
-set objid [ lindex $argv 3 ]
-set numprocs [ lindex $argv 4 ]
-
-set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
-error_check_bad lock_open $myenv NULL
-error_check_good lock_open [is_substr $myenv "env"] 1
-
-puts [eval $test $myenv $lockerid $objid $numprocs]
-
-error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
-error_check_good envclose [$myenv close] 0
-
-exit
diff --git a/storage/bdb/test/dead001.tcl b/storage/bdb/test/dead001.tcl
deleted file mode 100644
index fca094bf17c..00000000000
--- a/storage/bdb/test/dead001.tcl
+++ /dev/null
@@ -1,88 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead001.tcl,v 11.37 2004/01/28 03:36:26 bostic Exp $
-#
-# TEST dead001
-# TEST Use two different configurations to test deadlock detection among a
-# TEST variable number of processes. One configuration has the processes
-# TEST deadlocked in a ring. The other has the processes all deadlocked on
-# TEST a single resource.
-proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
- {timeout 0} {tnum "001"} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- puts "Dead$tnum: Deadlock detector tests"
-
- env_cleanup $testdir
-
- # Create the environment.
- puts "\tDead$tnum.a: creating environment"
- set env [berkdb_env -create \
- -mode 0644 -lock -lock_timeout $timeout -home $testdir]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- foreach t $tests {
- foreach n $procs {
- if {$timeout == 0 } {
- set dpid [exec $util_path/db_deadlock -vw \
- -h $testdir >& $testdir/dd.out &]
- } else {
- set dpid [exec $util_path/db_deadlock -vw \
- -ae -h $testdir >& $testdir/dd.out &]
- }
-
- sentinel_init
- set pidlist ""
- set ret [$env lock_id_set $lock_curid $lock_maxid]
- error_check_good lock_id_set $ret 0
-
- # Fire off the tests
- puts "\tDead$tnum: $n procs of test $t"
- for { set i 0 } { $i < $n } { incr i } {
- set locker [$env lock_id]
- puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead$tnum.log.$i \
- ddscript.tcl $testdir $t $locker $i $n"
- set p [exec $tclsh_path \
- $test_path/wrap.tcl \
- ddscript.tcl $testdir/dead$tnum.log.$i \
- $testdir $t $locker $i $n &]
- lappend pidlist $p
- }
- watch_procs $pidlist 5
-
- # Now check output
- set dead 0
- set clean 0
- set other 0
- for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead$tnum.log.$i]
- while { [gets $did val] != -1 } {
- switch $val {
- DEADLOCK { incr dead }
- 1 { incr clean }
- default { incr other }
- }
- }
- close $did
- }
- tclkill $dpid
- puts "\tDead$tnum: dead check..."
- dead_check $t $n $timeout $dead $clean $other
- }
- }
-
- # Windows needs files closed before deleting files, so pause a little
- tclsleep 3
- fileremove -f $testdir/dd.out
- # Remove log files
- for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead$tnum.log.$i
- }
- error_check_good lock_env:close [$env close] 0
-}
diff --git a/storage/bdb/test/dead002.tcl b/storage/bdb/test/dead002.tcl
deleted file mode 100644
index 7493216d22d..00000000000
--- a/storage/bdb/test/dead002.tcl
+++ /dev/null
@@ -1,82 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead002.tcl,v 11.30 2004/07/07 17:05:55 carol Exp $
-#
-# TEST dead002
-# TEST Same test as dead001, but use "detect on every collision" instead
-# TEST of separate deadlock detector.
-proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
- {timeout 0} {tnum 002} } {
- source ./include.tcl
-
- puts "Dead$tnum: Deadlock detector tests (detect on every collision)"
-
- env_cleanup $testdir
-
- # Create the environment.
- puts "\tDead$tnum.a: creating environment"
- set lmode "default"
- if { $timeout != 0 } {
- set lmode "expire"
- }
- set env [berkdb_env \
- -create -mode 0644 -home $testdir \
- -lock -lock_timeout $timeout -lock_detect $lmode]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- foreach t $tests {
- foreach n $procs {
- set pidlist ""
- sentinel_init
-
- # Fire off the tests
- puts "\tDead$tnum: $n procs of test $t"
- for { set i 0 } { $i < $n } { incr i } {
- set locker [$env lock_id]
- puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead$tnum.log.$i \
- ddscript.tcl $testdir $t $locker $i $n"
- set p [exec $tclsh_path \
- $test_path/wrap.tcl \
- ddscript.tcl $testdir/dead$tnum.log.$i \
- $testdir $t $locker $i $n &]
- lappend pidlist $p
- # If we're running with timeouts, pause so that
- # locks will have a chance to time out.
- if { $timeout != 0 } {
- tclsleep 2
- }
- }
- watch_procs $pidlist 5
-
- # Now check output
- set dead 0
- set clean 0
- set other 0
- for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead$tnum.log.$i]
- while { [gets $did val] != -1 } {
- switch $val {
- DEADLOCK { incr dead }
- 1 { incr clean }
- default { incr other }
- }
- }
- close $did
- }
-
- puts "\tDead$tnum: dead check ..."
- dead_check $t $n $timeout $dead $clean $other
- }
- }
-
- fileremove -f $testdir/dd.out
- # Remove log files
- for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead$tnum.log.$i
- }
- error_check_good lock_env:close [$env close] 0
-}
diff --git a/storage/bdb/test/dead003.tcl b/storage/bdb/test/dead003.tcl
deleted file mode 100644
index 2a74ce4237b..00000000000
--- a/storage/bdb/test/dead003.tcl
+++ /dev/null
@@ -1,99 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead003.tcl,v 1.20 2004/01/28 03:36:26 bostic Exp $
-#
-# TEST dead003
-# TEST
-# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
-# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
-proc dead003 { {procs "2 4 10"} {tests "ring clump"} {tnum "003"} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- set detects { oldest youngest }
- puts "Dead$tnum: Deadlock detector tests: $detects"
-
- # Create the environment.
- foreach d $detects {
- env_cleanup $testdir
- puts "\tDead$tnum.a: creating environment for $d"
- set env [berkdb_env \
- -create -mode 0644 -home $testdir -lock -lock_detect $d]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- foreach t $tests {
- foreach n $procs {
- set pidlist ""
- sentinel_init
- set ret [$env lock_id_set \
- $lock_curid $lock_maxid]
- error_check_good lock_id_set $ret 0
-
- # Fire off the tests
- puts "\tDead$tnum: $n procs of test $t"
- for { set i 0 } { $i < $n } { incr i } {
- set locker [$env lock_id]
- puts "$tclsh_path\
- test_path/ddscript.tcl $testdir \
- $t $locker $i $n >& \
- $testdir/dead$tnum.log.$i"
- set p [exec $tclsh_path \
- $test_path/wrap.tcl \
- ddscript.tcl \
- $testdir/dead$tnum.log.$i $testdir \
- $t $locker $i $n &]
- lappend pidlist $p
- }
- watch_procs $pidlist 5
-
- # Now check output
- set dead 0
- set clean 0
- set other 0
- for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead$tnum.log.$i]
- while { [gets $did val] != -1 } {
- switch $val {
- DEADLOCK { incr dead }
- 1 { incr clean }
- default { incr other }
- }
- }
- close $did
- }
- puts "\tDead$tnum: dead check..."
- dead_check $t $n 0 $dead $clean $other
- #
- # If we get here we know we have the
- # correct number of dead/clean procs, as
- # checked by dead_check above. Now verify
- # that the right process was the one.
- puts "\tDead$tnum: Verify $d locks were aborted"
- set l ""
- if { $d == "oldest" } {
- set l [expr $n - 1]
- }
- if { $d == "youngest" } {
- set l 0
- }
- set did [open $testdir/dead$tnum.log.$l]
- while { [gets $did val] != -1 } {
- error_check_good check_abort \
- $val 1
- }
- close $did
- }
- }
-
- fileremove -f $testdir/dd.out
- # Remove log files
- for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead$tnum.log.$i
- }
- error_check_good lock_env:close [$env close] 0
- }
-}
diff --git a/storage/bdb/test/dead004.tcl b/storage/bdb/test/dead004.tcl
deleted file mode 100644
index 4f33dcd75a9..00000000000
--- a/storage/bdb/test/dead004.tcl
+++ /dev/null
@@ -1,108 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead004.tcl,v 11.15 2004/01/28 03:36:27 bostic Exp $
-#
-# Deadlock Test 4.
-# This test is designed to make sure that we handle youngest and oldest
-# deadlock detection even when the youngest and oldest transactions in the
-# system are not involved in the deadlock (that is, we want to abort the
-# youngest/oldest which is actually involved in the deadlock, not simply
-# the youngest/oldest in the system).
-# Since this is used for transaction systems, the locker ID is what we
-# use to identify age (smaller number is older).
-#
-# The set up is that we have a total of 6 processes. The oldest (locker 0)
-# and the youngest (locker 5) simply acquire a lock, hold it for a long time
-# and then release it. The rest form a ring, obtaining lock N and requesting
-# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
-# to abort and not 0 or 5.
-
-proc dead004 { {tnum "004"} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- foreach a { o y } {
- puts "Dead$tnum: Deadlock detector test -a $a"
- env_cleanup $testdir
-
- # Create the environment.
- puts "\tDead$tnum.a: creating environment"
- set env [berkdb_env -create -mode 0644 -lock -home $testdir]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
- -h $testdir >& $testdir/dd.out &]
-
- set procs 6
-
- foreach n $procs {
-
- sentinel_init
- set pidlist ""
- set ret [$env lock_id_set $lock_curid $lock_maxid]
- error_check_good lock_id_set $ret 0
-
- # Fire off the tests
- puts "\tDead$tnum: $n procs"
- for { set i 0 } { $i < $n } { incr i } {
- set locker [$env lock_id]
- puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead$tnum.log.$i \
- ddoyscript.tcl $testdir $locker $n $a $i"
- set p [exec $tclsh_path \
- $test_path/wrap.tcl \
- ddoyscript.tcl $testdir/dead$tnum.log.$i \
- $testdir $locker $n $a $i &]
- lappend pidlist $p
- }
- watch_procs $pidlist 5
-
- }
- # Now check output
- set dead 0
- set clean 0
- set other 0
- for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead$tnum.log.$i]
- while { [gets $did val] != -1 } {
- switch $val {
- DEADLOCK { incr dead }
- 1 { incr clean }
- default { incr other }
- }
- }
- close $did
- }
- tclkill $dpid
-
- puts "\tDead$tnum: dead check..."
- dead_check oldyoung $n 0 $dead $clean $other
-
- # Now verify that neither the oldest nor the
- # youngest were the deadlock.
- set did [open $testdir/dead$tnum.log.0]
- error_check_bad file:young [gets $did val] -1
- error_check_good read:young $val 1
- close $did
-
- set did [open $testdir/dead$tnum.log.[expr $procs - 1]]
- error_check_bad file:old [gets $did val] -1
- error_check_good read:old $val 1
- close $did
-
- # Windows needs files closed before deleting files,
- # so pause a little
- tclsleep 2
- fileremove -f $testdir/dd.out
-
- # Remove log files
- for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead$tnum.log.$i
- }
- error_check_good lock_env:close [$env close] 0
- }
-}
diff --git a/storage/bdb/test/dead005.tcl b/storage/bdb/test/dead005.tcl
deleted file mode 100644
index 78e9ce838d7..00000000000
--- a/storage/bdb/test/dead005.tcl
+++ /dev/null
@@ -1,89 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead005.tcl,v 11.15 2004/03/17 15:17:17 bostic Exp $
-#
-# Deadlock Test 5.
-# Test out the minlocks, maxlocks, and minwrites options
-# to the deadlock detector.
-proc dead005 { { procs "4 6 10" } \
- {tests "maxlocks maxwrites minlocks minwrites" } { tnum "005" } } {
- source ./include.tcl
-
- foreach t $tests {
- puts "Dead$tnum.$t: deadlock detection tests"
- env_cleanup $testdir
-
- # Create the environment.
- set env [berkdb_env -create -mode 0644 -lock -home $testdir]
- error_check_good lock_env:open [is_valid_env $env] TRUE
- case $t {
- maxlocks { set to m }
- maxwrites { set to W }
- minlocks { set to n }
- minwrites { set to w }
- }
- foreach n $procs {
- set dpid [exec $util_path/db_deadlock -vw -h $testdir \
- -a $to >& $testdir/dd.out &]
- sentinel_init
- set pidlist ""
-
- # Fire off the tests
- puts "\tDead$tnum: $t test with $n procs"
- for { set i 0 } { $i < $n } { incr i } {
- set locker [$env lock_id]
- puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead$tnum.log.$i \
- ddscript.tcl $testdir $t $locker $i $n"
- set p [exec $tclsh_path \
- $test_path/wrap.tcl \
- ddscript.tcl $testdir/dead$tnum.log.$i \
- $testdir $t $locker $i $n &]
- lappend pidlist $p
- }
- watch_procs $pidlist 5
-
- # Now check output
- set dead 0
- set clean 0
- set other 0
- for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead$tnum.log.$i]
- while { [gets $did val] != -1 } {
- switch $val {
- DEADLOCK { incr dead }
- 1 { incr clean }
- default { incr other }
- }
- }
- close $did
- }
- tclkill $dpid
- puts "\tDead$tnum: dead check..."
- dead_check $t $n 0 $dead $clean $other
- # Now verify that the correct participant
- # got deadlocked.
- switch $t {
- maxlocks {set f [expr $n - 1]}
- maxwrites {set f 2}
- minlocks {set f 0}
- minwrites {set f 1}
- }
- set did [open $testdir/dead$tnum.log.$f]
- error_check_bad file:$t [gets $did val] -1
- error_check_good read($f):$t $val DEADLOCK
- close $did
- }
- error_check_good lock_env:close [$env close] 0
- # Windows needs files closed before deleting them, so pause
- tclsleep 2
- fileremove -f $testdir/dd.out
- # Remove log files
- for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead001.log.$i
- }
- }
-}
diff --git a/storage/bdb/test/dead006.tcl b/storage/bdb/test/dead006.tcl
deleted file mode 100644
index 4d80af41894..00000000000
--- a/storage/bdb/test/dead006.tcl
+++ /dev/null
@@ -1,16 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead006.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST dead006
-# TEST use timeouts rather than the normal dd algorithm.
-proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
- {timeout 1000} {tnum 006} } {
- source ./include.tcl
-
- dead001 $procs $tests $timeout $tnum
- dead002 $procs $tests $timeout $tnum
-}
diff --git a/storage/bdb/test/dead007.tcl b/storage/bdb/test/dead007.tcl
deleted file mode 100644
index e9aefa9c0ba..00000000000
--- a/storage/bdb/test/dead007.tcl
+++ /dev/null
@@ -1,36 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: dead007.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST dead007
-# TEST Tests for locker and txn id wraparound.
-proc dead007 { {tnum "007"} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- set save_curid $lock_curid
- set save_maxid $lock_maxid
- puts "Dead$tnum.a -- wrap around"
- set lock_curid [expr $lock_maxid - 2]
- dead001 "2 10" "ring clump" "0" $tnum
- ## Oldest/youngest breaks when the id wraps
- # dead003 "4 10"
- dead004 $tnum
-
- puts "Dead$tnum.b -- extend space"
- set lock_maxid [expr $lock_maxid - 3]
- set lock_curid [expr $lock_maxid - 1]
- dead001 "4 10" "ring clump" "0" $tnum
- ## Oldest/youngest breaks when the id wraps
- # dead003 "10"
- dead004 $tnum
-
- set lock_curid $save_curid
- set lock_maxid $save_maxid
- # Return the empty string so we don't return lock_maxid.
- return ""
-}
diff --git a/storage/bdb/test/env001.tcl b/storage/bdb/test/env001.tcl
deleted file mode 100644
index 4e2c070e579..00000000000
--- a/storage/bdb/test/env001.tcl
+++ /dev/null
@@ -1,154 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env001.tcl,v 11.28 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env001
-# TEST Test of env remove interface (formerly env_remove).
-proc env001 { } {
- global errorInfo
- global errorCode
-
- source ./include.tcl
-
- set testfile $testdir/env.db
- set t1 $testdir/t1
-
- puts "Env001: Test of environment remove interface."
- env_cleanup $testdir
-
- # Try opening without Create flag should error
- puts "\tEnv001.a: Open without create (should fail)."
- catch {set env [berkdb_env_noerr -home $testdir]} ret
- error_check_good env:fail [is_substr $ret "no such file"] 1
-
- # Now try opening with create
- puts "\tEnv001.b: Open with create."
- set env [berkdb_env -create -mode 0644 -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
-
- # Make sure that close works.
- puts "\tEnv001.c: Verify close."
- error_check_good env:close:$env [$env close] 0
-
- # Make sure we can reopen -- this doesn't work on Windows
- # because if there is only one opener, the region disappears
- # when it is closed. We can't do a second opener, because
- # that will fail on HP-UX.
- puts "\tEnv001.d: Remove on closed environments."
- if { $is_windows_test != 1 } {
- puts "\t\tEnv001.d.1: Verify re-open."
- set env [berkdb_env -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
-
- # remove environment
- puts "\t\tEnv001.d.2: Close environment."
- error_check_good env:close [$env close] 0
- puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
- error_check_good \
- envremove [berkdb envremove -force -home $testdir] 0
- }
-
- if { $is_windows_test != 1 && $is_hp_test != 1 } {
- puts "\tEnv001.e: Remove on open environments."
- puts "\t\tEnv001.e.1: Env is open by single proc,\
- remove no force."
- set env [berkdb_env -create -mode 0644 -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
- set stat [catch {berkdb envremove -home $testdir} ret]
- error_check_good env:remove $stat 1
- error_check_good env:close [$env close] 0
- }
-
- puts \
- "\t\tEnv001.e.2: Env is open by single proc, remove with force."
- # Now that envremove doesn't do a close, this won't work on Windows.
- if { $is_windows_test != 1 && $is_hp_test != 1} {
- set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
- set stat [catch {berkdb envremove -force -home $testdir} ret]
- error_check_good env:remove(force) $ret 0
- #
- # Even though the underlying env is gone, we need to close
- # the handle.
- #
- set stat [catch {$env close} ret]
- error_check_bad env:close_after_remove $stat 0
- error_check_good env:close_after_remove \
- [is_substr $ret "recovery"] 1
- }
-
- puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
- # should fail
- set env [berkdb_env -create -mode 0644 -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
-
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
-
- set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
- # First close our env, but leave remote open
- error_check_good env:close [$env close] 0
- catch {berkdb envremove -home $testdir} ret
- error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
- #
- # even though it failed, $env is no longer valid, so remove it in
- # the remote process
- set remote_close [send_cmd $f1 "$remote_env close"]
- error_check_good remote_close $remote_close 0
-
- # exit remote process
- set err [catch { close $f1 } result]
- error_check_good close_remote_process $err 0
-
- puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
- # You cannot do this on windows because you can't remove files that
- # are open, so we skip this test for Windows. On UNIX, it should
- # succeed
- if { $is_windows_test != 1 && $is_hp_test != 1 } {
- set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
- error_check_bad env:$testdir $env NULL
- error_check_good env:$testdir [is_substr $env "env"] 1
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
-
- set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- catch {berkdb envremove -force -home $testdir} ret
- error_check_good envremove:2procs:force $ret 0
- #
- # We still need to close our handle.
- #
- set stat [catch {$env close} ret]
- error_check_bad env:close_after_error $stat 0
- error_check_good env:close_after_error \
- [is_substr $ret recovery] 1
-
- # Close down remote process
- set err [catch { close $f1 } result]
- error_check_good close_remote_process $err 0
- }
-
- # Try opening in a different dir
- puts "\tEnv001.f: Try opening env in another directory."
- if { [file exists $testdir/NEWDIR] != 1 } {
- file mkdir $testdir/NEWDIR
- }
- set eflags "-create -home $testdir/NEWDIR -mode 0644"
- set env [eval {berkdb_env} $eflags]
- error_check_bad env:open $env NULL
- error_check_good env:close [$env close] 0
- error_check_good berkdb:envremove \
- [berkdb envremove -home $testdir/NEWDIR] 0
-
- puts "\tEnv001 complete."
-}
diff --git a/storage/bdb/test/env002.tcl b/storage/bdb/test/env002.tcl
deleted file mode 100644
index 70f573c2980..00000000000
--- a/storage/bdb/test/env002.tcl
+++ /dev/null
@@ -1,156 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env002.tcl,v 11.17 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env002
-# TEST Test of DB_LOG_DIR and env name resolution.
-# TEST With an environment path specified using -home, and then again
-# TEST with it specified by the environment variable DB_HOME:
-# TEST 1) Make sure that the set_lg_dir option is respected
-# TEST a) as a relative pathname.
-# TEST b) as an absolute pathname.
-# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
-# TEST again as relative and absolute pathnames.
-# TEST 3) Make sure that if -both- db_config and a file are present,
-# TEST only the file is respected (see doc/env/naming.html).
-proc env002 { } {
- # env002 is essentially just a small driver that runs
- # env002_body--formerly the entire test--twice; once, it
- # supplies a "home" argument to use with environment opens,
- # and the second time it sets DB_HOME instead.
- # Note that env002_body itself calls env002_run_test to run
- # the body of the actual test and check for the presence
- # of logs. The nesting, I hope, makes this test's structure simpler.
-
- global env
- source ./include.tcl
-
- puts "Env002: set_lg_dir test."
-
- puts "\tEnv002: Running with -home argument to berkdb_env."
- env002_body "-home $testdir"
-
- puts "\tEnv002: Running with environment variable DB_HOME set."
- set env(DB_HOME) $testdir
- env002_body "-use_environ"
-
- unset env(DB_HOME)
-
- puts "\tEnv002: Running with both DB_HOME and -home set."
- # Should respect -only- -home, so we give it a bogus
- # environment variable setting.
- set env(DB_HOME) $testdir/bogus_home
- env002_body "-use_environ -home $testdir"
- unset env(DB_HOME)
-
-}
-
-proc env002_body { home_arg } {
- source ./include.tcl
-
- env_cleanup $testdir
- set logdir "logs_in_here"
-
- file mkdir $testdir/$logdir
-
- # Set up full path to $logdir for when we test absolute paths.
- set curdir [pwd]
- cd $testdir/$logdir
- set fulllogdir [pwd]
- cd $curdir
-
- env002_make_config $logdir
-
- # Run the meat of the test.
- env002_run_test a 1 "relative path, config file" $home_arg \
- $testdir/$logdir
-
- env_cleanup $testdir
-
- file mkdir $fulllogdir
- env002_make_config $fulllogdir
-
- # Run the test again
- env002_run_test a 2 "absolute path, config file" $home_arg \
- $fulllogdir
-
- env_cleanup $testdir
-
- # Now we try without a config file, but instead with db_config
- # relative paths
- file mkdir $testdir/$logdir
- env002_run_test b 1 "relative path, db_config" "$home_arg \
- -log_dir $logdir -data_dir ." \
- $testdir/$logdir
-
- env_cleanup $testdir
-
- # absolute
- file mkdir $fulllogdir
- env002_run_test b 2 "absolute path, db_config" "$home_arg \
- -log_dir $fulllogdir -data_dir ." \
- $fulllogdir
-
- env_cleanup $testdir
-
- # Now, set db_config -and- have a # DB_CONFIG file, and make
- # sure only the latter is honored.
-
- file mkdir $testdir/$logdir
- env002_make_config $logdir
-
- # note that we supply a -nonexistent- log dir to db_config
- env002_run_test c 1 "relative path, both db_config and file" \
- "$home_arg -log_dir $testdir/bogus \
- -data_dir ." $testdir/$logdir
- env_cleanup $testdir
-
- file mkdir $fulllogdir
- env002_make_config $fulllogdir
-
- # note that we supply a -nonexistent- log dir to db_config
- env002_run_test c 2 "relative path, both db_config and file" \
- "$home_arg -log_dir $fulllogdir/bogus \
- -data_dir ." $fulllogdir
-}
-
-proc env002_run_test { major minor msg env_args log_path} {
- global testdir
- set testfile "env002.db"
-
- puts "\t\tEnv002.$major.$minor: $msg"
-
- # Create an environment, with logging, and scribble some
- # stuff in a [btree] database in it.
- # puts [concat {berkdb_env -create -log -private} $env_args]
- set dbenv [eval {berkdb_env -create -log -private} $env_args]
- error_check_good env_open [is_valid_env $dbenv] TRUE
- set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set key "some_key"
- set data "some_data"
-
- error_check_good db_put \
- [$db put $key [chop_data btree $data]] 0
-
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
-
- # Now make sure the log file is where we want it to be.
- error_check_good db_exists [file exists $testdir/$testfile] 1
- error_check_good log_exists \
- [file exists $log_path/log.0000000001] 1
-}
-
-proc env002_make_config { logdir } {
- global testdir
-
- set cid [open $testdir/DB_CONFIG w]
- puts $cid "set_data_dir ."
- puts $cid "set_lg_dir $logdir"
- close $cid
-}
diff --git a/storage/bdb/test/env003.tcl b/storage/bdb/test/env003.tcl
deleted file mode 100644
index 247fcd3c9a8..00000000000
--- a/storage/bdb/test/env003.tcl
+++ /dev/null
@@ -1,149 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env003.tcl,v 11.23 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env003
-# TEST Test DB_TMP_DIR and env name resolution
-# TEST With an environment path specified using -home, and then again
-# TEST with it specified by the environment variable DB_HOME:
-# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
-# TEST a) as a relative pathname.
-# TEST b) as an absolute pathname.
-# TEST 2) Make sure that the -tmp_dir config option is respected,
-# TEST again as relative and absolute pathnames.
-# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
-# TEST only the file is respected (see doc/env/naming.html).
-proc env003 { } {
- # env003 is essentially just a small driver that runs
- # env003_body twice. First, it supplies a "home" argument
- # to use with environment opens, and the second time it sets
- # DB_HOME instead.
- # Note that env003_body itself calls env003_run_test to run
- # the body of the actual test.
-
- global env
- source ./include.tcl
-
- puts "Env003: DB_TMP_DIR test."
-
- puts "\tEnv003: Running with -home argument to berkdb_env."
- env003_body "-home $testdir"
-
- puts "\tEnv003: Running with environment variable DB_HOME set."
- set env(DB_HOME) $testdir
- env003_body "-use_environ"
-
- unset env(DB_HOME)
-
- puts "\tEnv003: Running with both DB_HOME and -home set."
- # Should respect -only- -home, so we give it a bogus
- # environment variable setting.
- set env(DB_HOME) $testdir/bogus_home
- env003_body "-use_environ -home $testdir"
- unset env(DB_HOME)
-}
-
-proc env003_body { home_arg } {
- source ./include.tcl
-
- env_cleanup $testdir
- set tmpdir "tmpfiles_in_here"
- file mkdir $testdir/$tmpdir
-
- # Set up full path to $tmpdir for when we test absolute paths.
- set curdir [pwd]
- cd $testdir/$tmpdir
- set fulltmpdir [pwd]
- cd $curdir
-
- # Create DB_CONFIG
- env003_make_config $tmpdir
-
- # Run the meat of the test.
- env003_run_test a 1 "relative path, config file" $home_arg \
- $testdir/$tmpdir
-
- env003_make_config $fulltmpdir
-
- # Run the test again
- env003_run_test a 2 "absolute path, config file" $home_arg \
- $fulltmpdir
-
- # Now we try without a config file, but instead with db_config
- # relative paths
- env003_run_test b 1 "relative path, db_config" "$home_arg \
- -tmp_dir $tmpdir -data_dir ." \
- $testdir/$tmpdir
-
- # absolute paths
- env003_run_test b 2 "absolute path, db_config" "$home_arg \
- -tmp_dir $fulltmpdir -data_dir ." \
- $fulltmpdir
-
- # Now, set db_config -and- have a # DB_CONFIG file, and make
- # sure only the latter is honored.
-
- file mkdir $testdir/bogus
- env003_make_config $tmpdir
-
- env003_run_test c 1 "relative path, both db_config and file" \
- "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
- $testdir/$tmpdir
-
- file mkdir $fulltmpdir/bogus
- env003_make_config $fulltmpdir
-
- env003_run_test c 2 "absolute path, both db_config and file" \
- "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
- $fulltmpdir
-}
-
-proc env003_run_test { major minor msg env_args tmp_path} {
- global testdir
- global alphabet
- global errorCode
-
- puts "\t\tEnv003.$major.$minor: $msg"
-
- # Create an environment and small-cached in-memory database to
- # use.
- set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
- {-cachesize {0 50000 1}}]
- error_check_good env_open [is_valid_env $dbenv] TRUE
-
- set db [berkdb_open -env $dbenv -create -btree]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # Fill the database with more than its cache can fit.
- #
- # When CONFIG_TEST is defined, the tempfile is left linked so
- # we can check for its existence. Size the data to overfill
- # the cache--the temp file is created lazily, so it is created
- # when the cache overflows.
- #
- set key "key"
- set data [repeat $alphabet 2000]
- error_check_good db_put [$db put $key $data] 0
-
- # Check for exactly one temp file.
- set ret [glob -nocomplain $tmp_path/BDB*]
- error_check_good temp_file_exists [llength $ret] 1
-
- # Can't remove temp file until db is closed on Windows.
- error_check_good db_close [$db close] 0
- fileremove -f $ret
- error_check_good env_close [$dbenv close] 0
-
-}
-
-proc env003_make_config { tmpdir } {
- global testdir
-
- set cid [open $testdir/DB_CONFIG w]
- puts $cid "set_data_dir ."
- puts $cid "set_tmp_dir $tmpdir"
- close $cid
-}
diff --git a/storage/bdb/test/env004.tcl b/storage/bdb/test/env004.tcl
deleted file mode 100644
index fe975d700fd..00000000000
--- a/storage/bdb/test/env004.tcl
+++ /dev/null
@@ -1,95 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env004.tcl,v 11.22 2004/04/23 15:40:12 sue Exp $
-#
-# TEST env004
-# TEST Test multiple data directories. Do a bunch of different opens
-# TEST to make sure that the files are detected in different directories.
-proc env004 { } {
- source ./include.tcl
-
- set method "hash"
- set omethod [convert_method $method]
- set args [convert_args $method ""]
-
- puts "Env004: Multiple data directory test."
-
- env_cleanup $testdir
- file mkdir $testdir/data1
- file mkdir $testdir/data2
- file mkdir $testdir/data3
-
- puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
-
- # Create a config file
- set cid [open $testdir/DB_CONFIG w]
- puts $cid "set_data_dir ."
- puts $cid "set_data_dir data1"
- puts $cid "set_data_dir data2"
- puts $cid "set_data_dir data3"
- close $cid
-
- set e [berkdb_env -create -private -home $testdir]
- error_check_good dbenv [is_valid_env $e] TRUE
- ddir_test $method $e $args
- error_check_good env_close [$e close] 0
-
- puts "\tEnv004.b: Multiple data directories in berkdb_env call."
- env_cleanup $testdir
- file mkdir $testdir/data1
- file mkdir $testdir/data2
- file mkdir $testdir/data3
-
- # Now call dbenv with config specified
- set e [berkdb_env -create -private \
- -data_dir . -data_dir data1 -data_dir data2 \
- -data_dir data3 -home $testdir]
- error_check_good dbenv [is_valid_env $e] TRUE
- ddir_test $method $e $args
- error_check_good env_close [$e close] 0
-}
-
-proc ddir_test { method e args } {
- source ./include.tcl
-
- set args [convert_args $args]
- set omethod [convert_method $method]
-
- # Now create one file in each directory
- set db1 [eval {berkdb_open -create \
- -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
- error_check_good dbopen1 [is_valid_db $db1] TRUE
-
- set db2 [eval {berkdb_open -create \
- -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
- error_check_good dbopen2 [is_valid_db $db2] TRUE
-
- set db3 [eval {berkdb_open -create \
- -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
- error_check_good dbopen3 [is_valid_db $db3] TRUE
-
- # Close the files
- error_check_good db_close1 [$db1 close] 0
- error_check_good db_close2 [$db2 close] 0
- error_check_good db_close3 [$db3 close] 0
-
- # Now, reopen the files without complete pathnames and make
- # sure that we find them.
-
- set db1 [berkdb_open -env $e datafile1.db]
- error_check_good dbopen1 [is_valid_db $db1] TRUE
-
- set db2 [berkdb_open -env $e datafile2.db]
- error_check_good dbopen2 [is_valid_db $db2] TRUE
-
- set db3 [berkdb_open -env $e datafile3.db]
- error_check_good dbopen3 [is_valid_db $db3] TRUE
-
- # Finally close all the files
- error_check_good db_close1 [$db1 close] 0
- error_check_good db_close2 [$db2 close] 0
- error_check_good db_close3 [$db3 close] 0
-}
diff --git a/storage/bdb/test/env005.tcl b/storage/bdb/test/env005.tcl
deleted file mode 100644
index fc08bc97fa1..00000000000
--- a/storage/bdb/test/env005.tcl
+++ /dev/null
@@ -1,52 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env005.tcl,v 11.20 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env005
-# TEST Test that using subsystems without initializing them correctly
-# TEST returns an error. Cannot test mpool, because it is assumed in
-# TEST the Tcl code.
-proc env005 { } {
- source ./include.tcl
-
- puts "Env005: Uninitialized env subsystems test."
-
- env_cleanup $testdir
- puts "\tEnv005.a: Creating env with no subsystems."
- set e [berkdb_env_noerr -create -home $testdir]
- error_check_good dbenv [is_valid_env $e] TRUE
- set db [berkdb_open -create -btree $testdir/env005.db]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set rlist {
- { "lock_detect" "Env005.b0"}
- { "lock_get read 1 1" "Env005.b1"}
- { "lock_id" "Env005.b2"}
- { "lock_stat" "Env005.b3"}
- { "lock_timeout 100" "Env005.b4"}
- { "log_archive" "Env005.c0"}
- { "log_cursor" "Env005.c1"}
- { "log_file {1 1}" "Env005.c2"}
- { "log_flush" "Env005.c3"}
- { "log_put record" "Env005.c4"}
- { "log_stat" "Env005.c5"}
- { "txn" "Env005.d0"}
- { "txn_checkpoint" "Env005.d1"}
- { "txn_stat" "Env005.d2"}
- { "txn_timeout 100" "Env005.d3"}
- }
-
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
- puts "\t$msg: $cmd"
- set stat [catch {eval $e $cmd} ret]
- error_check_good $cmd $stat 1
- error_check_good $cmd.err [is_substr $ret invalid] 1
- }
- error_check_good dbclose [$db close] 0
- error_check_good envclose [$e close] 0
-}
diff --git a/storage/bdb/test/env006.tcl b/storage/bdb/test/env006.tcl
deleted file mode 100644
index 9f220335749..00000000000
--- a/storage/bdb/test/env006.tcl
+++ /dev/null
@@ -1,91 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env006.tcl,v 11.11 2004/04/27 19:56:44 carol Exp $
-#
-# TEST env006
-# TEST Make sure that all the utilities exist and run.
-# TEST Test that db_load -r options don't blow up.
-proc env006 { } {
- source ./include.tcl
-
- puts "Env006: Run underlying utilities."
-
- set rlist {
- { "db_archive" "Env006.a"}
- { "db_checkpoint" "Env006.b"}
- { "db_deadlock" "Env006.c"}
- { "db_dump" "Env006.d"}
- { "db_load" "Env006.e"}
- { "db_printlog" "Env006.f"}
- { "db_recover" "Env006.g"}
- { "db_stat" "Env006.h"}
- { "db_upgrade" "Env006.h"}
- { "db_verify" "Env006.h"}
- }
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
-
- puts "\t$msg: $cmd"
-
- set stat [catch {exec $util_path/$cmd -?} ret]
- error_check_good $cmd $stat 1
-
- #
- # Check for "usage", but only check "sage" so that
- # we can handle either Usage or usage.
- #
- error_check_good $cmd.err [is_substr $ret sage] 1
- }
-
- env_cleanup $testdir
- set env [eval berkdb_env -create -home $testdir -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set sub SUBDB
- foreach case { noenv env } {
- if { $case == "env" } {
- set envargs " -env $env "
- set homeargs " -h $testdir "
- set testfile env006.db
- } else {
- set envargs ""
- set homeargs ""
- set testfile $testdir/env006.db
- }
-
- puts "\tEnv006.i: Testing db_load -r with $case."
- set db [eval berkdb_open -create $envargs -btree $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- set ret [eval \
- exec $util_path/db_load -r lsn $homeargs $testfile]
- error_check_good db_load_r_lsn $ret ""
- set ret [eval \
- exec $util_path/db_load -r fileid $homeargs $testfile]
- error_check_good db_load_r_fileid $ret ""
-
- error_check_good db_remove \
- [eval {berkdb dbremove} $envargs $testfile] 0
-
- puts "\tEnv006.j: Testing db_load -r with $case and subdbs."
- set db [eval berkdb_open -create $envargs -btree $testfile $sub]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- set ret [eval \
- exec {$util_path/db_load} -r lsn $homeargs $testfile]
- error_check_good db_load_r_lsn $ret ""
- set ret [eval \
- exec {$util_path/db_load} -r fileid $homeargs $testfile]
- error_check_good db_load_r_fileid $ret ""
-
- error_check_good \
- db_remove [eval {berkdb dbremove} $envargs $testfile] 0
- }
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/env007.tcl b/storage/bdb/test/env007.tcl
deleted file mode 100644
index efbb17d55c7..00000000000
--- a/storage/bdb/test/env007.tcl
+++ /dev/null
@@ -1,605 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env007.tcl,v 11.41 2004/09/22 18:01:04 bostic Exp $
-#
-# TEST env007
-# TEST Test DB_CONFIG config file options for berkdb env.
-# TEST 1) Make sure command line option is respected
-# TEST 2) Make sure that config file option is respected
-# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
-# TEST method is used, only the file is respected.
-# TEST Then test all known config options.
-# TEST Also test config options on berkdb open. This isn't
-# TEST really env testing, but there's no better place to put it.
-proc env007 { } {
- global errorInfo
- global passwd
- global has_crypto
- source ./include.tcl
-
- puts "Env007: DB_CONFIG and getters test."
- puts "Env007.a: Test berkdb env options using getters and stat."
-
- # Set up options we can check via stat or getters. Structure
- # of the list is:
- # 0. Arg used in berkdb env command
- # 1. Arg used in DB_CONFIG file
- # 2. Value assigned in berkdb env command
- # 3. Value assigned in DB_CONFIG file
- # 4. Message output during test
- # 5. Stat command to run (empty if we can't get the info
- # from stat).
- # 6. String to search for in stat output
- # 7. Which arg to check in stat (needed for cases where
- # we set more than one args at a time, but stat can
- # only check one args, like cachesize)
- # 8. Arg used in getter
- #
- set rlist {
- { " -txn_max " "set_tx_max" "19" "31"
- "Env007.a1: Txn Max" "txn_stat"
- "Max Txns" "0" "get_tx_max" }
- { " -lock_max_locks " "set_lk_max_locks" "17" "29"
- "Env007.a2: Lock Max" "lock_stat"
- "Maximum locks" "0" "get_lk_max_locks" }
- { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
- "Env007.a3: Max Lockers" "lock_stat"
- "Maximum lockers" "0" "get_lk_max_lockers" }
- { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
- "Env007.a4: Max Objects" "lock_stat"
- "Maximum objects" "0" "get_lk_max_objects" }
- { " -log_buffer " "set_lg_bsize" "65536" "131072"
- "Env007.a5: Log Bsize" "log_stat"
- "Log record cache size" "0" "get_lg_bsize" }
- { " -log_max " "set_lg_max" "8388608" "9437184"
- "Env007.a6: Log Max" "log_stat"
- "Current log file size" "0" "get_lg_max" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.0: Cachesize" "mpool_stat"
- "Cache size (gbytes)" "0" "get_cachesize" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.1: Cachesize" "mpool_stat"
- "Cache size (bytes)" "1" "get_cachesize" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.2: Cachesize" "mpool_stat"
- "Number of caches" "2" "get_cachesize" }
- { " -lock_timeout " "set_lock_timeout" "100" "120"
- "Env007.a8: Lock Timeout" "lock_stat"
- "Lock timeout value" "0" "get_timeout lock" }
- { " -log_regionmax " "set_lg_regionmax" "8388608" "4194304"
- "Env007.a9: Log Regionmax" "log_stat"
- "Region size" "0" "get_lg_regionmax" }
- { " -mpool_max_openfd " "set_mp_max_openfd" "17" "27"
- "Env007.a10: Mmap max openfd" "mpool_stat"
- "Maximum open file descriptors" "0" "get_mp_max_openfd" }
- { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67"
- "Env007.a11.0: Mmap max write" "mpool_stat"
- "Maximum sequential buffer writes" "0" "get_mp_max_write" }
- { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67"
- "Env007.a11.1: Mmap max write" "mpool_stat"
- "Sleep after writing maximum buffers" "1" "get_mp_max_write" }
- { " -mpool_mmap_size " "set_mp_mmapsize" "12582912" "8388608"
- "Env007.a12: Mmapsize" "mpool_stat"
- "Maximum memory-mapped file size" "0" "get_mp_mmapsize" }
- { " -shm_key " "set_shm_key" "15" "35"
- "Env007.a13: Shm Key" ""
- "" "" "get_shm_key" }
- { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR"
- "Env007.a14: Temp dir" ""
- "" "" "get_tmp_dir" }
- { " -txn_timeout " "set_txn_timeout" "100" "120"
- "Env007.a15: Txn timeout" "lock_stat"
- "Transaction timeout value" "0" "get_timeout txn" }
- }
-
- set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn "
- set qnxexclude {set_cachesize}
- foreach item $rlist {
- set envarg [lindex $item 0]
- set configarg [lindex $item 1]
- set envval [lindex $item 2]
- set configval [lindex $item 3]
- set msg [lindex $item 4]
- set statcmd [lindex $item 5]
- set statstr [lindex $item 6]
- set index [lindex $item 7]
- set getter [lindex $item 8]
-
- if { $is_qnx_test &&
- [lsearch $qnxexclude $configarg] != -1 } {
- puts "\tEnv007.a: Skipping $configarg for QNX"
- continue
- }
-
- env_cleanup $testdir
-
- # First verify using just env args
- puts "\t$msg Environment argument only"
- set env [eval $e $envarg {$envval}]
- error_check_good envopen:0 [is_valid_env $env] TRUE
- error_check_good get_envval [eval $env $getter] $envval
- if { $statcmd != "" } {
- set statenvval [lindex $envval $index]
- # log_stat reports the sum of the specified
- # region size and the log buffer size.
- if { $statstr == "Region size" } {
- set lbufsize 32768
- set statenvval [expr $statenvval + $lbufsize]
- }
- env007_check $env $statcmd $statstr $statenvval
- }
- error_check_good envclose:0 [$env close] 0
-
- env_cleanup $testdir
- env007_make_config $configarg $configval
-
- # Verify using just config file
- puts "\t$msg Config file only"
- set env [eval $e]
- error_check_good envopen:1 [is_valid_env $env] TRUE
- error_check_good get_configval1 [eval $env $getter] $configval
- if { $statcmd != "" } {
- set statconfigval [lindex $configval $index]
- if { $statstr == "Region size" } {
- set statconfigval [expr $statconfigval + $lbufsize]
- }
- env007_check $env $statcmd $statstr $statconfigval
- }
- error_check_good envclose:1 [$env close] 0
-
- # Now verify using env args and config args
- puts "\t$msg Environment arg and config file"
- set env [eval $e $envarg {$envval}]
- error_check_good envopen:2 [is_valid_env $env] TRUE
- # Getter should retrieve config val, not envval.
- error_check_good get_configval2 [eval $env $getter] $configval
- if { $statcmd != "" } {
- env007_check $env $statcmd $statstr $statconfigval
- }
- error_check_good envclose:2 [$env close] 0
- }
-
- #
- # Test all options that can be set in DB_CONFIG. Write it out
- # to the file and make sure we can open the env. This execs
- # the config file code. Also check with a getter that the
- # expected value is returned.
- #
- puts "\tEnv007.b: Test berkdb env config options using getters\
- and env open."
-
- # The cfglist variable contains options that can be set in DB_CONFIG.
- set cfglist {
- { "set_data_dir" "." "get_data_dirs" "." }
- { "set_flags" "db_auto_commit" "get_flags" "-auto_commit" }
- { "set_flags" "db_cdb_alldb" "get_flags" "-cdb_alldb" }
- { "set_flags" "db_direct_db" "get_flags" "-direct_db" }
- { "set_flags" "db_direct_log" "get_flags" "-direct_log" }
- { "set_flags" "db_dsync_log" "get_flags" "-dsync_log" }
- { "set_flags" "db_log_autoremove" "get_flags" "-log_remove" }
- { "set_flags" "db_nolocking" "get_flags" "-nolock" }
- { "set_flags" "db_nommap" "get_flags" "-nommap" }
- { "set_flags" "db_nopanic" "get_flags" "-nopanic" }
- { "set_flags" "db_overwrite" "get_flags" "-overwrite" }
- { "set_flags" "db_region_init" "get_flags" "-region_init" }
- { "set_flags" "db_txn_nosync" "get_flags" "-nosync" }
- { "set_flags" "db_txn_write_nosync" "get_flags" "-wrnosync" }
- { "set_flags" "db_yieldcpu" "get_flags" "-yield" }
- { "set_lg_bsize" "65536" "get_lg_bsize" "65536" }
- { "set_lg_dir" "." "get_lg_dir" "." }
- { "set_lg_max" "8388608" "get_lg_max" "8388608" }
- { "set_lg_regionmax" "65536" "get_lg_regionmax" "65536" }
- { "set_lk_detect" "db_lock_default" "get_lk_detect" "default" }
- { "set_lk_detect" "db_lock_expire" "get_lk_detect" "expire" }
- { "set_lk_detect" "db_lock_maxlocks" "get_lk_detect" "maxlocks" }
- { "set_lk_detect" "db_lock_minlocks" "get_lk_detect" "minlocks" }
- { "set_lk_detect" "db_lock_minwrite" "get_lk_detect" "minwrite" }
- { "set_lk_detect" "db_lock_oldest" "get_lk_detect" "oldest" }
- { "set_lk_detect" "db_lock_random" "get_lk_detect" "random" }
- { "set_lk_detect" "db_lock_youngest" "get_lk_detect" "youngest" }
- { "set_lk_max_lockers" "1500" "get_lk_max_lockers" "1500" }
- { "set_lk_max_locks" "29" "get_lk_max_locks" "29" }
- { "set_lk_max_objects" "1500" "get_lk_max_objects" "1500" }
- { "set_lock_timeout" "100" "get_timeout lock" "100" }
- { "set_mp_mmapsize" "12582912" "get_mp_mmapsize" "12582912" }
- { "set_mp_max_write" "10 20" "get_mp_max_write" "10 20" }
- { "set_mp_max_openfd" "10" "get_mp_max_openfd" "10" }
- { "set_region_init" "1" "get_flags" "-region_init" }
- { "set_shm_key" "15" "get_shm_key" "15" }
- { "set_tas_spins" "15" "get_tas_spins" "15" }
- { "set_tmp_dir" "." "get_tmp_dir" "." }
- { "set_tx_max" "31" "get_tx_max" "31" }
- { "set_txn_timeout" "50" "get_timeout txn" "50" }
- { "set_verbose" "db_verb_deadlock" "get_verbose deadlock" "on" }
- { "set_verbose" "db_verb_recovery" "get_verbose recovery" "on" }
- { "set_verbose" "db_verb_replication" "get_verbose rep" "on" }
- { "set_verbose" "db_verb_waitsfor" "get_verbose wait" "on" }
- }
-
- env_cleanup $testdir
- set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn"
- set directlist {db_direct_db db_direct_log}
- foreach item $cfglist {
- env_cleanup $testdir
- set configarg [lindex $item 0]
- set configval [lindex $item 1]
- set getter [lindex $item 2]
- set getval [lindex $item 3]
-
- env007_make_config $configarg $configval
-
- # Verify using config file
- puts "\t\tEnv007.b: $configarg $configval"
-
- # Unconfigured/unsupported direct I/O is not reported
- # as a failure.
- set directmsg \
- "direct I/O either not configured or not supported"
- if {[catch { eval $e } env ]} {
- if { [lsearch $directlist $configval] != -1 && \
- [is_substr $env $directmsg] == 1 } {
- continue
- } else {
- puts "FAIL: $env"
- continue
- }
- }
- error_check_good envvalid:1 [is_valid_env $env] TRUE
- error_check_good getter:1 [eval $env $getter] $getval
- error_check_good envclose:1 [$env close] 0
- }
-
- puts "\tEnv007.c: Test berkdb env options using getters and env open."
- # The envopenlist variable contains options that can be set using
- # berkdb env. We always set -mpool.
- set envopenlist {
- { "-cdb" "" "-cdb" "get_open_flags" }
- { "-errpfx" "FOO" "FOO" "get_errpfx" }
- { "-lock" "" "-lock" "get_open_flags" }
- { "-log" "" "-log" "get_open_flags" }
- { "" "" "-mpool" "get_open_flags" }
- { "-system_mem" "-shm_key 1" "-system_mem" "get_open_flags" }
- { "-txn" "" "-txn" "get_open_flags" }
- { "-recover" "-txn" "-recover" "get_open_flags" }
- { "-recover_fatal" "-txn" "-recover_fatal" "get_open_flags" }
- { "-use_environ" "" "-use_environ" "get_open_flags" }
- { "-use_environ_root" "" "-use_environ_root" "get_open_flags" }
- { "" "" "-create" "get_open_flags" }
- { "-private" "" "-private" "get_open_flags" }
- { "-thread" "" "-thread" "get_open_flags" }
- { "-txn_timestamp" "100000000" "100000000" "get_tx_timestamp" }
- }
-
- if { $has_crypto == 1 } {
- lappend envopenlist \
- { "-encryptaes" "$passwd" "-encryptaes" "get_encrypt_flags" }
- }
-
- set e "berkdb_env -create -mode 0644 -home $testdir"
- set qnxexclude {-system_mem}
- foreach item $envopenlist {
- set envarg [lindex $item 0]
- set envval [lindex $item 1]
- set retval [lindex $item 2]
- set getter [lindex $item 3]
-
- if { $is_qnx_test &&
- [lsearch $qnxexclude $envarg] != -1} {
- puts "\t\tEnv007: Skipping $envarg for QNX"
- continue
- }
- env_cleanup $testdir
- # Set up env
- set env [eval $e $envarg $envval]
- error_check_good envopen [is_valid_env $env] TRUE
-
- # Check that getter retrieves expected retval.
- set get_retval [eval $env $getter]
- if { [is_substr $get_retval $retval] != 1 } {
- puts "FAIL: $retval\
- should be a substring of $get_retval"
- continue
- }
- error_check_good envclose [$env close] 0
-
- # The -encryptany flag can only be tested on an existing
- # environment that supports encryption, so do it here.
- if { $has_crypto == 1 } {
- if { $envarg == "-encryptaes" } {
- set env [eval berkdb_env -home $testdir\
- -encryptany $passwd]
- error_check_good get_encryptany \
- [eval $env get_encrypt_flags] "-encryptaes"
- error_check_good env_close [$env close] 0
- }
- }
- }
-
- puts "\tEnv007.d: Test berkdb env options using set_flags and getters."
-
- # The flaglist variable contains options that can be set using
- # $env set_flags.
- set flaglist {
- { "-direct_db" }
- { "-direct_log" }
- { "-dsync_log" }
- { "-log_remove" }
- { "-nolock" }
- { "-nommap" }
- { "-nopanic" }
- { "-nosync" }
- { "-overwrite" }
- { "-panic" }
- { "-wrnosync" }
- }
- set e "berkdb_env_noerr -create -mode 0644 -home $testdir"
- set directlist {-direct_db -direct_log}
- foreach item $flaglist {
- set flag [lindex $item 0]
- env_cleanup $testdir
-
- # Set up env
- set env [eval $e]
- error_check_good envopen [is_valid_env $env] TRUE
-
- # Use set_flags to turn on new env characteristics.
- #
- # Unconfigured/unsupported direct I/O is not reported
- # as a failure.
- if {[catch { $env set_flags $flag on } res ]} {
- if { [lsearch $directlist $flag] != -1 && \
- [is_substr $res $directmsg] == 1 } {
- error_check_good env_close [$env close] 0
- continue
- } else {
- puts "FAIL: $res"
- error_check_good env_close [$env close] 0
- continue
- }
- } else {
- error_check_good "flag $flag on" $res 0
- }
-
- # Check that getter retrieves expected retval.
- set get_retval [eval $env get_flags]
- if { [is_substr $get_retval $flag] != 1 } {
- puts "FAIL: $flag should be a substring of $get_retval"
- error_check_good env_close [$env close] 0
- continue
- }
- # Use set_flags to turn off env characteristics, make sure
- # they are gone.
- error_check_good "flag $flag off" [$env set_flags $flag off] 0
- set get_retval [eval $env get_flags]
- if { [is_substr $get_retval $flag] == 1 } {
- puts "FAIL: $flag should not be in $get_retval"
- error_check_good env_close [$env close] 0
- continue
- }
-
- error_check_good envclose [$env close] 0
- }
-
- puts "\tEnv007.e: Test env get_home."
- env_cleanup $testdir
- # Set up env
- set env [eval $e]
- error_check_good env_open [is_valid_env $env] TRUE
- # Test for correct value.
- set get_retval [eval $env get_home]
- error_check_good get_home $get_retval $testdir
- error_check_good envclose [$env close] 0
-
- puts "\tEnv007.f: Test that bad config values are rejected."
- set cfglist {
- { "set_cachesize" "1048576" }
- { "set_flags" "db_xxx" }
- { "set_flags" "1" }
- { "set_flags" "db_txn_nosync x" }
- { "set_lg_bsize" "db_xxx" }
- { "set_lg_max" "db_xxx" }
- { "set_lg_regionmax" "db_xxx" }
- { "set_lk_detect" "db_xxx" }
- { "set_lk_detect" "1" }
- { "set_lk_detect" "db_lock_youngest x" }
- { "set_lk_max" "db_xxx" }
- { "set_lk_max_locks" "db_xxx" }
- { "set_lk_max_lockers" "db_xxx" }
- { "set_lk_max_objects" "db_xxx" }
- { "set_mp_max_openfd" "1 2" }
- { "set_mp_max_write" "1 2 3" }
- { "set_mp_mmapsize" "db_xxx" }
- { "set_region_init" "db_xxx" }
- { "set_shm_key" "db_xxx" }
- { "set_tas_spins" "db_xxx" }
- { "set_tx_max" "db_xxx" }
- { "set_verbose" "db_xxx" }
- { "set_verbose" "1" }
- { "set_verbose" "db_verb_recovery x" }
- }
-
- set e "berkdb_env_noerr -create -mode 0644 \
- -home $testdir -log -lock -txn "
- foreach item $cfglist {
- set configarg [lindex $item 0]
- set configval [lindex $item 1]
-
- env007_make_config $configarg $configval
-
- # verify using just config file
- set stat [catch {eval $e} ret]
- error_check_good envopen $stat 1
- error_check_good error [is_substr $errorInfo \
- "incorrect arguments for name-value pair"] 1
- }
-
- puts "\tEnv007.g: Config name error set_xxx"
- set e "berkdb_env_noerr -create -mode 0644 \
- -home $testdir -log -lock -txn "
- env007_make_config "set_xxx" 1
- set stat [catch {eval $e} ret]
- error_check_good envopen $stat 1
- error_check_good error [is_substr $errorInfo \
- "unrecognized name-value pair"] 1
-
- puts "\tEnv007.h: Test berkdb open flags and getters."
- # Check options that we configure with berkdb open and
- # query via getters. Structure of the list is:
- # 0. Flag used in berkdb open command
- # 1. Value specified to flag
- # 2. Specific method, if needed
- # 3. Arg used in getter
-
- set olist {
- { "-minkey" "4" " -btree " "get_bt_minkey" }
- { "-cachesize" "0 1048576 1" "" "get_cachesize" }
- { "" "FILENAME DBNAME" "" "get_dbname" }
- { "" "" "" "get_env" }
- { "-errpfx" "ERROR:" "" "get_errpfx" }
- { "" "-chksum" "" "get_flags" }
- { "-delim" "58" "-recno" "get_re_delim" }
- { "" "-dup" "" "get_flags" }
- { "" "-dup -dupsort" "" "get_flags" }
- { "" "-recnum" "" "get_flags" }
- { "" "-revsplitoff" "" "get_flags" }
- { "" "-renumber" "-recno" "get_flags" }
- { "" "-snapshot" "-recno" "get_flags" }
- { "" "-create" "" "get_open_flags" }
- { "" "-create -dirty" "" "get_open_flags" }
- { "" "-create -excl" "" "get_open_flags" }
- { "" "-create -nommap" "" "get_open_flags" }
- { "" "-create -thread" "" "get_open_flags" }
- { "" "-create -truncate" "" "get_open_flags" }
- { "-ffactor" "40" " -hash " "get_h_ffactor" }
- { "-lorder" "4321" "" "get_lorder" }
- { "-nelem" "10000" " -hash " "get_h_nelem" }
- { "-pagesize" "4096" "" "get_pagesize" }
- { "-extent" "4" "-queue" "get_q_extentsize" }
- { "-len" "20" "-recno" "get_re_len" }
- { "-pad" "0" "-recno" "get_re_pad" }
- { "-source" "include.tcl" "-recno" "get_re_source" }
- }
-
- set o "berkdb_open -create -mode 0644"
- foreach item $olist {
- cleanup $testdir NULL
- set flag [lindex $item 0]
- set flagval [lindex $item 1]
- set method [lindex $item 2]
- if { $method == "" } {
- set method " -btree "
- }
- set getter [lindex $item 3]
-
- # Check that open is successful with the flag.
- # The option -cachesize requires grouping for $flagval.
- if { $flag == "-cachesize" } {
- set db [eval $o $method $flag {$flagval}\
- $testdir/a.db]
- } else {
- set db [eval $o $method $flag $flagval\
- $testdir/a.db]
- }
- error_check_good dbopen:0 [is_valid_db $db] TRUE
-
- # Check that getter retrieves the correct value.
- # Cachesizes under 500MB are adjusted upward to
- # about 25% so just make sure we're in the right
- # ballpark, between 1.2 and 1.3 of the original value.
- if { $flag == "-cachesize" } {
- set retval [eval $db $getter]
- set retbytes [lindex $retval 1]
- set setbytes [lindex $flagval 1]
- error_check_good cachesize_low\
- [expr $retbytes > [expr $setbytes * 6 / 5]] 1
- error_check_good cachesize_high\
- [expr $retbytes < [expr $setbytes * 13 / 10]] 1
- } else {
- error_check_good get_flagval \
- [eval $db $getter] $flagval
- }
- error_check_good dbclose:0 [$db close] 0
- }
-
- puts "\tEnv007.i: Test berkdb_open -rdonly."
- # This test is done separately because -rdonly can only be specified
- # on an already existing database.
- set flag "-rdonly"
- set db [eval berkdb_open $flag $testdir/a.db]
- error_check_good open_rdonly [is_valid_db $db] TRUE
-
- error_check_good get_rdonly [eval $db get_open_flags] $flag
- error_check_good dbclose:0 [$db close] 0
-
- puts "\tEnv007.j: Test berkdb open flags and getters\
- requiring environments."
- # Check options that we configure with berkdb open and
- # query via getters. Structure of the list is:
- # 0. Flag used in berkdb open command
- # 1. Value specified to flag
- # 2. Specific method, if needed
- # 3. Arg used in getter
- # 4. Additional flags needed in setting up env
-
- set elist {
- { "" "-auto_commit" "" "get_open_flags" "" }
- }
-
- if { $has_crypto == 1 } {
- lappend elist \
- { "" "-encrypt" "" "get_flags" "-encryptaes $passwd" }
- }
-
- set e "berkdb_env -create -home $testdir -txn "
- set o "berkdb_open -create -btree -mode 0644 "
- foreach item $elist {
- env_cleanup $testdir
- set flag [lindex $item 0]
- set flagval [lindex $item 1]
- set method [lindex $item 2]
- if { $method == "" } {
- set method " -btree "
- }
- set getter [lindex $item 3]
- set envflag [lindex $item 4]
-
- # Check that open is successful with the flag.
- set env [eval $e $envflag]
- set db [eval $o -env $env $flag $flagval a.db]
- error_check_good dbopen:0 [is_valid_db $db] TRUE
-
- # Check that getter retrieves the correct value
- set get_flagval [eval $db $getter]
- error_check_good get_flagval [is_substr $get_flagval $flagval] 1
- error_check_good dbclose [$db close] 0
- error_check_good envclose [$env close] 0
- }
-}
-
-proc env007_check { env statcmd statstr testval } {
- set stat [$env $statcmd]
- set checked 0
- foreach statpair $stat {
- if {$checked == 1} {
- break
- }
- set statmsg [lindex $statpair 0]
- set statval [lindex $statpair 1]
- if {[is_substr $statmsg $statstr] != 0} {
- set checked 1
- error_check_good $statstr:ck $statval $testval
- }
- }
- error_check_good $statstr:test $checked 1
-}
-
-proc env007_make_config { carg cval } {
- global testdir
-
- set cid [open $testdir/DB_CONFIG w]
- puts $cid "$carg $cval"
- close $cid
-}
diff --git a/storage/bdb/test/env008.tcl b/storage/bdb/test/env008.tcl
deleted file mode 100644
index c203d55d4dd..00000000000
--- a/storage/bdb/test/env008.tcl
+++ /dev/null
@@ -1,73 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env008.tcl,v 11.8 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env008
-# TEST Test environments and subdirectories.
-proc env008 { } {
- global errorInfo
- global errorCode
-
- source ./include.tcl
-
- env_cleanup $testdir
-
- set subdir 1/1
- set subdir1 1/2
- file mkdir $testdir/$subdir $testdir/$subdir1
- set testfile $subdir/env.db
-
- puts "Env008: Test of environments and subdirectories."
-
- puts "\tEnv008.a: Create env and db."
- set env [berkdb_env -create -mode 0644 -home $testdir -txn]
- error_check_good env [is_valid_env $env] TRUE
-
- puts "\tEnv008.b: Remove db in subdir."
- env008_db $env $testfile
- error_check_good dbremove:$testfile \
- [berkdb dbremove -env $env $testfile] 0
-
- #
- # Rather than remaking the db every time for the renames
- # just move around the new file name to another new file
- # name.
- #
- puts "\tEnv008.c: Rename db in subdir."
- env008_db $env $testfile
- set newfile $subdir/new.db
- error_check_good dbrename:$testfile/.. \
- [berkdb dbrename -env $env $testfile $newfile] 0
- set testfile $newfile
-
- puts "\tEnv008.d: Rename db to parent dir."
- set newfile $subdir/../new.db
- error_check_good dbrename:$testfile/.. \
- [berkdb dbrename -env $env $testfile $newfile] 0
- set testfile $newfile
-
- puts "\tEnv008.e: Rename db to child dir."
- set newfile $subdir/env.db
- error_check_good dbrename:$testfile/.. \
- [berkdb dbrename -env $env $testfile $newfile] 0
- set testfile $newfile
-
- puts "\tEnv008.f: Rename db to another dir."
- set newfile $subdir1/env.db
- error_check_good dbrename:$testfile/.. \
- [berkdb dbrename -env $env $testfile $newfile] 0
-
- error_check_good envclose [$env close] 0
- puts "\tEnv008 complete."
-}
-
-proc env008_db { env testfile } {
- set db [berkdb_open -env $env -create -btree $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set ret [$db put key data]
- error_check_good dbput $ret 0
- error_check_good dbclose [$db close] 0
-}
diff --git a/storage/bdb/test/env009.tcl b/storage/bdb/test/env009.tcl
deleted file mode 100644
index e6fd3a56373..00000000000
--- a/storage/bdb/test/env009.tcl
+++ /dev/null
@@ -1,82 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env009.tcl,v 11.9 2004/09/23 21:45:21 mjc Exp $
-#
-# TEST env009
-# TEST Test calls to all the various stat functions. We have several
-# TEST sprinkled throughout the test suite, but this will ensure that
-# TEST we run all of them at least once.
-proc env009 { } {
- source ./include.tcl
-
- puts "Env009: Various stat functions test."
-
- env_cleanup $testdir
- puts "\tEnv009.a: Setting up env and a database."
-
- set e [berkdb_env -create -home $testdir -txn]
- error_check_good dbenv [is_valid_env $e] TRUE
- set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
- error_check_good dbopen [is_valid_db $dbbt] TRUE
- set dbh [berkdb_open -create -hash $testdir/env009h.db]
- error_check_good dbopen [is_valid_db $dbh] TRUE
- set dbq [berkdb_open -create -queue $testdir/env009q.db]
- error_check_good dbopen [is_valid_db $dbq] TRUE
-
- puts "\tEnv009.b: Setting up replication master and client envs."
- replsetup $testdir/MSGQUEUEDIR
- set masterdir $testdir/MASTERDIR
- set clientdir $testdir/CLIENTDIR
- file mkdir $masterdir
- file mkdir $clientdir
-
- repladd 1
- set repenv(M) [berkdb_env -create -home $masterdir \
- -txn -rep_master -rep_transport [list 1 replsend]]
- repladd 2
- set repenv(C) [berkdb_env -create -home $clientdir \
- -txn -rep_client -rep_transport [list 2 replsend]]
-
- set rlist {
- { "lock_stat" "Maximum locks" "Env009.c" $e }
- { "log_stat" "Magic" "Env009.d" "$e" }
- { "mpool_stat" "Number of caches" "Env009.e" "$e"}
- { "txn_stat" "Max Txns" "Env009.f" "$e" }
- { "rep_stat" "{Environment ID} 1" "Env009.g (Master)" "$repenv(M)"}
- { "rep_stat" "{Environment ID} 2" "Env009.h (Client)" "$repenv(C)"}
- }
-
- foreach set $rlist {
- set cmd [lindex $set 0]
- set str [lindex $set 1]
- set msg [lindex $set 2]
- set env [lindex $set 3]
- puts "\t$msg: $cmd"
- set ret [eval $env $cmd]
- error_check_good $cmd [is_substr $ret $str] 1
- }
-
- puts "\tEnv009.i: btree stats"
- set ret [$dbbt stat]
- error_check_good $cmd [is_substr $ret "Leaf pages"] 1
-
- puts "\tEnv009.j: hash stats"
- set ret [$dbh stat]
- error_check_good $cmd [is_substr $ret "Buckets"] 1
-
- puts "\tEnv009.k: queue stats"
- set ret [$dbq stat]
- error_check_good $cmd [is_substr $ret "Extent size"] 1
-
- # Clean up.
- error_check_good dbclose [$dbbt close] 0
- error_check_good dbclose [$dbh close] 0
- error_check_good dbclose [$dbq close] 0
- error_check_good masterenvclose [$repenv(M) close] 0
- error_check_good clientenvclose [$repenv(C) close] 0
- replclose $testdir/MSGQUEUEDIR
- error_check_good envclose [$e close] 0
-}
diff --git a/storage/bdb/test/env010.tcl b/storage/bdb/test/env010.tcl
deleted file mode 100644
index 403f0cd9dd6..00000000000
--- a/storage/bdb/test/env010.tcl
+++ /dev/null
@@ -1,50 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env010.tcl,v 1.7 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env010
-# TEST Run recovery in an empty directory, and then make sure we can still
-# TEST create a database in that directory.
-proc env010 { } {
- source ./include.tcl
-
- puts "Env010: Test of recovery in an empty directory."
-
- # Create a new directory used only for this test
-
- if { [file exists $testdir/EMPTYDIR] != 1 } {
- file mkdir $testdir/EMPTYDIR
- } else {
- puts "\nDirectory already exists."
- }
-
- # Do the test twice, for regular recovery and catastrophic
- # Open environment and recover, but don't create a database
-
- foreach rmethod {recover recover_fatal} {
-
- puts "\tEnv010: Creating env for $rmethod test."
- env_cleanup $testdir/EMPTYDIR
- set e [berkdb_env \
- -create -home $testdir/EMPTYDIR -txn -$rmethod]
- error_check_good dbenv [is_valid_env $e] TRUE
-
- # Open and close a database
- # The method doesn't matter, so picked btree arbitrarily
-
- set db [eval {berkdb_open -env $e \
- -btree -create -mode 0644} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- # Close environment
-
- error_check_good envclose [$e close] 0
- error_check_good berkdb:envremove \
- [berkdb envremove -home $testdir/EMPTYDIR] 0
- }
- puts "\tEnv010 complete."
-}
diff --git a/storage/bdb/test/env011.tcl b/storage/bdb/test/env011.tcl
deleted file mode 100644
index de6c82b3670..00000000000
--- a/storage/bdb/test/env011.tcl
+++ /dev/null
@@ -1,39 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: env011.tcl,v 1.4 2004/01/28 03:36:27 bostic Exp $
-#
-# TEST env011
-# TEST Run with region overwrite flag.
-proc env011 { } {
- source ./include.tcl
-
- puts "Env011: Test of region overwriting."
- env_cleanup $testdir
-
- puts "\tEnv011: Creating/closing env for open test."
- set e [berkdb_env -create -overwrite -home $testdir -txn]
- error_check_good dbenv [is_valid_env $e] TRUE
- set db [eval \
- {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- set ret [eval {$db put} -auto_commit "aaa" "data"]
- error_check_good put $ret 0
- set ret [eval {$db put} -auto_commit "bbb" "data"]
- error_check_good put $ret 0
- error_check_good db_close [$db close] 0
- error_check_good envclose [$e close] 0
-
- puts "\tEnv011: Opening the environment with overwrite set."
- set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
- error_check_good dbenv [is_valid_env $e] TRUE
- error_check_good envclose [$e close] 0
-
- puts "\tEnv011: Removing the environment with overwrite set."
- error_check_good berkdb:envremove \
- [berkdb envremove -home $testdir -overwrite] 0
-
- puts "\tEnv011 complete."
-}
diff --git a/storage/bdb/test/hsearch.tcl b/storage/bdb/test/hsearch.tcl
deleted file mode 100644
index f2d223264d4..00000000000
--- a/storage/bdb/test/hsearch.tcl
+++ /dev/null
@@ -1,51 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: hsearch.tcl,v 11.11 2004/01/28 03:36:28 bostic Exp $
-#
-# Historic Hsearch interface test.
-# Use the first 1000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Then reopen the file, re-retrieve everything.
-# Finally, delete everything.
-proc hsearch { { nentries 1000 } } {
- source ./include.tcl
-
- puts "HSEARCH interfaces test: $nentries"
-
- # Create the database and open the dictionary
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir NULL
-
- error_check_good hcreate [berkdb hcreate $nentries] 0
- set did [open $dict]
- set count 0
-
- puts "\tHSEARCH.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- set ret [berkdb hsearch $str $str enter]
- error_check_good hsearch:enter $ret 0
-
- set d [berkdb hsearch $str 0 find]
- error_check_good hsearch:find $d $str
- incr count
- }
- close $did
-
- puts "\tHSEARCH.b: re-get loop"
- set did [open $dict]
- # Here is the loop where we retrieve each key
- while { [gets $did str] != -1 && $count < $nentries } {
- set d [berkdb hsearch $str 0 find]
- error_check_good hsearch:find $d $str
- incr count
- }
- close $did
- error_check_good hdestroy [berkdb hdestroy] 0
-}
diff --git a/storage/bdb/test/join.tcl b/storage/bdb/test/join.tcl
deleted file mode 100644
index eba811dfdc7..00000000000
--- a/storage/bdb/test/join.tcl
+++ /dev/null
@@ -1,455 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: join.tcl,v 11.23 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST jointest
-# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
-# TEST with differing index orders and selectivity.
-# TEST
-# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
-# TEST work, everything else does as well. We'll create test databases
-# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
-# TEST the database describes the duplication -- duplicates are of the
-# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
-# TEST Primary.db is the primary database, and null.db is the database
-# TEST that has no matching duplicates.
-# TEST
-# TEST We should test this on all btrees, all hash, and a combination thereof
-proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
- global testdir
- global rand_init
- source ./include.tcl
-
- env_cleanup $testdir
- berkdb srand $rand_init
-
- # Use one environment for all database opens so we don't
- # need oodles of regions.
- set env [berkdb_env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # With the new offpage duplicate code, we don't support
- # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
- # is greater than one, run only with "-dup".
- if { $with_dup_dups > 1 } {
- set doptarray {"-dup"}
- } else {
- set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
- }
-
- # NB: these flags are internal only, ok
- foreach m "DB_BTREE DB_HASH DB_BOTH" {
- # run with two different random mixes.
- foreach dopt $doptarray {
- set opt [list "-env" $env $dopt]
-
- puts "Join test: ($m $dopt) psize $psize,\
- $with_dup_dups dup\
- dups, flags $flags."
-
- build_all $m $psize $opt oa $with_dup_dups
-
- # null.db is db_built fifth but is referenced by
- # zero; set up the option array appropriately.
- set oa(0) $oa(5)
-
- # Build the primary
- puts "\tBuilding the primary database $m"
- set oflags "-create -truncate -mode 0644 -env $env\
- [conv $m [berkdb random_int 1 2]]"
- set db [eval {berkdb_open} $oflags primary.db]
- error_check_good dbopen [is_valid_db $db] TRUE
- for { set i 0 } { $i < 1000 } { incr i } {
- set key [format "%04d" $i]
- set ret [$db put $key stub]
- error_check_good "primary put" $ret 0
- }
- error_check_good "primary close" [$db close] 0
- set did [open $dict]
- gets $did str
- do_join primary.db "1 0" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "2 0" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "3 0" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "4 0" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "1" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "2" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "3" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "4" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "1 2" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "1 2 3" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "1 2 3 4" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "3 2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "4 3 2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "1 3" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "3 1" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "1 4" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "4 1" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "2 3" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "3 2" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "2 4" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "4 2" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "3 4" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "4 3" $str oa $flags $with_dup_dups
- gets $did str
- do_join primary.db "2 3 4" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "3 4 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "4 2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "0 2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "3 2 0" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "4 3 2 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "4 3 0 1" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "3 3 3" $str oa $flags\
- $with_dup_dups
- gets $did str
- do_join primary.db "2 2 3 3" $str oa $flags\
- $with_dup_dups
- gets $did str2
- gets $did str
- do_join primary.db "1 2" $str oa $flags\
- $with_dup_dups "3" $str2
-
- # You really don't want to run this section
- # with $with_dup_dups > 2.
- if { $with_dup_dups <= 2 } {
- gets $did str2
- gets $did str
- do_join primary.db "1 2 3" $str\
- oa $flags $with_dup_dups "3 3 1" $str2
- gets $did str2
- gets $did str
- do_join primary.db "4 0 2" $str\
- oa $flags $with_dup_dups "4 3 3" $str2
- gets $did str2
- gets $did str
- do_join primary.db "3 2 1" $str\
- oa $flags $with_dup_dups "0 2" $str2
- gets $did str2
- gets $did str
- do_join primary.db "2 2 3 3" $str\
- oa $flags $with_dup_dups "1 4 4" $str2
- gets $did str2
- gets $did str
- do_join primary.db "2 2 3 3" $str\
- oa $flags $with_dup_dups "0 0 4 4" $str2
- gets $did str2
- gets $did str
- do_join primary.db "2 2 3 3" $str2\
- oa $flags $with_dup_dups "2 4 4" $str
- gets $did str2
- gets $did str
- do_join primary.db "2 2 3 3" $str2\
- oa $flags $with_dup_dups "0 0 4 4" $str
- }
- close $did
- }
- }
-
- error_check_good env_close [$env close] 0
-}
-
-proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
- global testdir
- db_build join1.db $nentries 50 1 [conv $method 1]\
- $psize $opt $oaname $with_dup_dups
- db_build join2.db $nentries 25 2 [conv $method 2]\
- $psize $opt $oaname $with_dup_dups
- db_build join3.db $nentries 16 3 [conv $method 3]\
- $psize $opt $oaname $with_dup_dups
- db_build join4.db $nentries 12 4 [conv $method 4]\
- $psize $opt $oaname $with_dup_dups
- db_build null.db $nentries 0 5 [conv $method 5]\
- $psize $opt $oaname $with_dup_dups
-}
-
-proc conv { m i } {
- switch -- $m {
- DB_HASH { return "-hash"}
- "-hash" { return "-hash"}
- DB_BTREE { return "-btree"}
- "-btree" { return "-btree"}
- DB_BOTH {
- if { [expr $i % 2] == 0 } {
- return "-hash";
- } else {
- return "-btree";
- }
- }
- }
-}
-
-proc random_opts { } {
- set j [berkdb random_int 0 1]
- if { $j == 0 } {
- return " -dup"
- } else {
- return " -dup -dupsort"
- }
-}
-
-proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
- with_dup_dups } {
- source ./include.tcl
-
- # Get array of arg names (from two levels up the call stack)
- upvar 2 $oaname oa
-
- # Search for "RANDOMMIX" in $opt, and if present, replace
- # with " -dup" or " -dup -dupsort" at random.
- set i [lsearch $lopt RANDOMMIX]
- if { $i != -1 } {
- set lopt [lreplace $lopt $i $i [random_opts]]
- }
-
- # Save off db_open arguments for this database.
- set opt [eval concat $lopt]
- set oa($dup_interval) $opt
-
- # Create the database and open the dictionary
- set oflags "-create -truncate -mode 0644 $method\
- -pagesize $psize"
- set db [eval {berkdb_open} $oflags $opt $name]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
- set count 0
- puts -nonewline "\tBuilding $name: $nkeys keys "
- puts -nonewline "with $ndups duplicates at interval of $dup_interval"
- if { $with_dup_dups > 0 } {
- puts ""
- puts "\t\tand $with_dup_dups duplicate duplicates."
- } else {
- puts "."
- }
- for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
- incr count} {
- set str $str$name
- # We need to make sure that the dups are inserted in a
- # random, or near random, order. Do this by generating
- # them and putting each in a list, then sorting the list
- # at random.
- set duplist {}
- for { set i 0 } { $i < $ndups } { incr i } {
- set data [format "%04d" [expr $i * $dup_interval]]
- lappend duplist $data
- }
- # randomize the list
- for { set i 0 } { $i < $ndups } {incr i } {
- # set j [berkdb random_int $i [expr $ndups - 1]]
- set j [expr ($i % 2) + $i]
- if { $j >= $ndups } { set j $i }
- set dupi [lindex $duplist $i]
- set dupj [lindex $duplist $j]
- set duplist [lreplace $duplist $i $i $dupj]
- set duplist [lreplace $duplist $j $j $dupi]
- }
- foreach data $duplist {
- if { $with_dup_dups != 0 } {
- for { set j 0 }\
- { $j < $with_dup_dups }\
- {incr j} {
- set ret [$db put $str $data]
- error_check_good put$j $ret 0
- }
- } else {
- set ret [$db put $str $data]
- error_check_good put $ret 0
- }
- }
-
- if { $ndups == 0 } {
- set ret [$db put $str NODUP]
- error_check_good put $ret 0
- }
- }
- close $did
- error_check_good close:$name [$db close] 0
-}
-
-proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
- global testdir
- source ./include.tcl
-
- upvar $oanm oa
-
- puts -nonewline "\tJoining: $dbs on $key"
- if { $dbs2 == "" } {
- puts ""
- } else {
- puts " with $dbs2 on $key2"
- }
-
- # Open all the databases
- set p [berkdb_open -unknown $testdir/$primary]
- error_check_good "primary open" [is_valid_db $p] TRUE
-
- set dblist ""
- set curslist ""
-
- set ndx [llength $dbs]
-
- foreach i [concat $dbs $dbs2] {
- set opt $oa($i)
- set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
- error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
- set curs [$db cursor]
- error_check_good "$db cursor" \
- [is_substr $curs "$db.c"] 1
- lappend dblist $db
- lappend curslist $curs
-
- if { $ndx > 0 } {
- set realkey [concat $key[n_to_name $i]]
- } else {
- set realkey [concat $key2[n_to_name $i]]
- }
-
- set pair [$curs get -set $realkey]
- error_check_good cursor_set:$realkey:$pair \
- [llength [lindex $pair 0]] 2
-
- incr ndx -1
- }
-
- set join_curs [eval {$p join} $curslist]
- error_check_good join_cursor \
- [is_substr $join_curs "$p.c"] 1
-
- # Calculate how many dups we expect.
- # We go through the list of indices. If we find a 0, then we
- # expect 0 dups. For everything else, we look at pairs of numbers,
- # if the are relatively prime, multiply them and figure out how
- # many times that goes into 50. If they aren't relatively prime,
- # take the number of times the larger goes into 50.
- set expected 50
- set last 1
- foreach n [concat $dbs $dbs2] {
- if { $n == 0 } {
- set expected 0
- break
- }
- if { $last == $n } {
- continue
- }
-
- if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
- if { $n > $last } {
- set last $n
- set expected [expr 50 / $last]
- }
- } else {
- set last [expr $n * $last / [gcd $n $last]]
- set expected [expr 50 / $last]
- }
- }
-
- # If $with_dup_dups is greater than zero, each datum has
- # been inserted $with_dup_dups times. So we expect the number
- # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
-
- if { $with_dup_dups > 0 } {
- foreach n [concat $dbs $dbs2] {
- set expected [expr $expected * $with_dup_dups]
- }
- }
-
- set ndups 0
- if { $flags == " -join_item"} {
- set l 1
- } else {
- set flags ""
- set l 2
- }
- for { set pair [eval {$join_curs get} $flags] } { \
- [llength [lindex $pair 0]] == $l } {
- set pair [eval {$join_curs get} $flags] } {
- set k [lindex [lindex $pair 0] 0]
- foreach i $dbs {
- error_check_bad valid_dup:$i:$dbs $i 0
- set kval [string trimleft $k 0]
- if { [string length $kval] == 0 } {
- set kval 0
- }
- error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
- }
- incr ndups
- }
- error_check_good number_of_dups:$dbs $ndups $expected
-
- error_check_good close_primary [$p close] 0
- foreach i $curslist {
- error_check_good close_cursor:$i [$i close] 0
- }
- foreach i $dblist {
- error_check_good close_index:$i [$i close] 0
- }
-}
-
-proc n_to_name { n } {
-global testdir
- if { $n == 0 } {
- return null.db;
- } else {
- return join$n.db;
- }
-}
-
-proc gcd { a b } {
- set g 1
-
- for { set i 2 } { $i <= $a } { incr i } {
- if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
- set g $i
- }
- }
- return $g
-}
diff --git a/storage/bdb/test/lock001.tcl b/storage/bdb/test/lock001.tcl
deleted file mode 100644
index 48eb95515cb..00000000000
--- a/storage/bdb/test/lock001.tcl
+++ /dev/null
@@ -1,122 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lock001.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST lock001
-# TEST Make sure that the basic lock tests work. Do some simple gets
-# TEST and puts for a single locker.
-proc lock001 { {iterations 1000} {maxlocks 1000} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- set save_curid $lock_curid
- set save_maxid $lock_maxid
-
- # Set defaults
- # Adjusted to make exact match of isqrt
- #set conflicts { 3 0 0 0 0 0 1 0 1 1}
- #set conflicts { 3 0 0 0 0 1 0 1 1}
-
- set conflicts { 0 0 0 0 0 1 0 1 1}
- set nmodes [isqrt [llength $conflicts]]
-
- # Cleanup
- env_cleanup $testdir
-
- # Open the region we'll use for testing.
- set eflags "-create -lock -home $testdir -mode 0644 \
- -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
- set env [eval {berkdb_env} $eflags]
- error_check_good env [is_valid_env $env] TRUE
- error_check_good lock_id_set \
- [$env lock_id_set $lock_curid $lock_maxid] 0
-
- puts "Lock001: test basic lock operations"
- set locker [$env lock_id]
- # Get and release each type of lock
- puts "\tLock001.a: get and release each type of lock"
- foreach m {ng write read} {
- set obj obj$m
- set lockp [$env lock_get $m $locker $obj]
- error_check_good lock_get:a [is_blocked $lockp] 0
- error_check_good lock_get:a [is_substr $lockp $env] 1
- set ret [ $lockp put ]
- error_check_good lock_put $ret 0
- }
-
- # Get a bunch of locks for the same locker; these should work
- set obj OBJECT
- puts "\tLock001.b: Get a bunch of locks for the same locker"
- foreach m {ng write read} {
- set lockp [$env lock_get $m $locker $obj ]
- lappend locklist $lockp
- error_check_good lock_get:b [is_blocked $lockp] 0
- error_check_good lock_get:b [is_substr $lockp $env] 1
- }
- release_list $locklist
-
- set locklist {}
- # Check that reference counted locks work
- puts "\tLock001.c: reference counted locks."
- for {set i 0} { $i < 10 } {incr i} {
- set lockp [$env lock_get -nowait write $locker $obj]
- error_check_good lock_get:c [is_blocked $lockp] 0
- error_check_good lock_get:c [is_substr $lockp $env] 1
- lappend locklist $lockp
- }
- release_list $locklist
-
- # Finally try some failing locks
- set locklist {}
- foreach i {ng write read} {
- set lockp [$env lock_get $i $locker $obj]
- lappend locklist $lockp
- error_check_good lock_get:d [is_blocked $lockp] 0
- error_check_good lock_get:d [is_substr $lockp $env] 1
- }
-
- # Change the locker
- set locker [$env lock_id]
- set blocklist {}
- # Skip NO_LOCK lock.
- puts "\tLock001.d: Change the locker, acquire read and write."
- foreach i {write read} {
- catch {$env lock_get -nowait $i $locker $obj} ret
- error_check_good lock_get:e [is_substr $ret "not granted"] 1
- #error_check_good lock_get:e [is_substr $lockp $env] 1
- #error_check_good lock_get:e [is_blocked $lockp] 0
- }
- # Now release original locks
- release_list $locklist
-
- # Now re-acquire blocking locks
- set locklist {}
- puts "\tLock001.e: Re-acquire blocking locks."
- foreach i {write read} {
- set lockp [$env lock_get -nowait $i $locker $obj ]
- error_check_good lock_get:f [is_substr $lockp $env] 1
- error_check_good lock_get:f [is_blocked $lockp] 0
- lappend locklist $lockp
- }
-
- # Now release new locks
- release_list $locklist
- error_check_good free_id [$env lock_id_free $locker] 0
-
- error_check_good envclose [$env close] 0
-
-}
-
-# Blocked locks appear as lockmgrN.lockM\nBLOCKED
-proc is_blocked { l } {
- if { [string compare $l BLOCKED ] == 0 } {
- return 1
- } else {
- return 0
- }
-}
diff --git a/storage/bdb/test/lock002.tcl b/storage/bdb/test/lock002.tcl
deleted file mode 100644
index 1fdea56cdb4..00000000000
--- a/storage/bdb/test/lock002.tcl
+++ /dev/null
@@ -1,157 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lock002.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST lock002
-# TEST Exercise basic multi-process aspects of lock.
-proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
- source ./include.tcl
-
- puts "Lock002: Basic multi-process lock tests."
-
- env_cleanup $testdir
-
- set nmodes [isqrt [llength $conflicts]]
-
- # Open the lock
- mlock_open $maxlocks $nmodes $conflicts
- mlock_wait
-}
-
-# Make sure that we can create a region; destroy it, attach to it,
-# detach from it, etc.
-proc mlock_open { maxl nmodes conflicts } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- puts "\tLock002.a multi-process open/close test"
-
- # Open/Create region here. Then close it and try to open from
- # other test process.
- set env_cmd [concat "berkdb_env -create -mode 0644 \
- -lock -lock_max $maxl -lock_conflict" \
- [list [list $nmodes $conflicts]] "-home $testdir"]
- set local_env [eval $env_cmd]
- $local_env lock_id_set $lock_curid $lock_maxid
- error_check_good env_open [is_valid_env $local_env] TRUE
-
- set ret [$local_env close]
- error_check_good env_close $ret 0
-
- # Open from other test process
- set env_cmd "berkdb_env -mode 0644 -home $testdir"
-
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
-
- set remote_env [send_cmd $f1 $env_cmd]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- # Now make sure that we can reopen the region.
- set local_env [eval $env_cmd]
- error_check_good env_open [is_valid_env $local_env] TRUE
- set ret [$local_env close]
- error_check_good env_close $ret 0
-
- # Try closing the remote region
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:lock_close $ret 0
-
- # Try opening for create. Will succeed because region exists.
- set env_cmd [concat "berkdb_env -create -mode 0644 \
- -lock -lock_max $maxl -lock_conflict" \
- [list [list $nmodes $conflicts]] "-home $testdir"]
- set local_env [eval $env_cmd]
- error_check_good remote:env_open [is_valid_env $local_env] TRUE
-
- # close locally
- reset_env $local_env
-
- # Close and exit remote
- set ret [send_cmd $f1 "reset_env $remote_env"]
-
- catch { close $f1 } result
-}
-
-proc mlock_wait { } {
- source ./include.tcl
-
- puts "\tLock002.b multi-process get/put wait test"
-
- # Open region locally
- set env_cmd "berkdb_env -lock -home $testdir"
- set local_env [eval $env_cmd]
- error_check_good env_open [is_valid_env $local_env] TRUE
-
- # Open region remotely
- set f1 [open |$tclsh_path r+]
-
- puts $f1 "source $test_path/test.tcl"
-
- set remote_env [send_cmd $f1 $env_cmd]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- # Get a write lock locally; try for the read lock
- # remotely. We hold the locks for several seconds
- # so that we can use timestamps to figure out if the
- # other process waited.
- set locker1 [$local_env lock_id]
- set local_lock [$local_env lock_get write $locker1 object1]
- error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
-
- # Now request a lock that we expect to hang; generate
- # timestamps so we can tell if it actually hangs.
- set locker2 [send_cmd $f1 "$remote_env lock_id"]
- set remote_lock [send_timed_cmd $f1 1 \
- "set lock \[$remote_env lock_get write $locker2 object1\]"]
-
- # Now sleep before releasing lock
- tclsleep 5
- set result [$local_lock put]
- error_check_good lock_put $result 0
-
- # Now get the result from the other script
- set result [rcv_result $f1]
- error_check_good lock_get:remote_time [expr $result > 4] 1
-
- # Now get the remote lock
- set remote_lock [send_cmd $f1 "puts \$lock"]
- error_check_good remote:lock_get \
- [is_valid_lock $remote_lock $remote_env] TRUE
-
- # Now make the other guy wait 5 second and then release his
- # lock while we try to get a write lock on it
- set start [timestamp -r]
-
- set ret [send_cmd $f1 "tclsleep 5"]
-
- set ret [send_cmd $f1 "$remote_lock put"]
-
- set local_lock [$local_env lock_get write $locker1 object1]
- error_check_good lock_get:time \
- [expr [expr [timestamp -r] - $start] > 2] 1
- error_check_good lock_get:local \
- [is_valid_lock $local_lock $local_env] TRUE
-
- # Now check remote's result
- set result [rcv_result $f1]
- error_check_good lock_put:remote $result 0
-
- # Clean up remote
- set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
- error_check_good remote_free_id $result 0
- set ret [send_cmd $f1 "reset_env $remote_env"]
-
- close $f1
-
- # Now close up locally
- set ret [$local_lock put]
- error_check_good lock_put $ret 0
- error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
-
- reset_env $local_env
-}
diff --git a/storage/bdb/test/lock003.tcl b/storage/bdb/test/lock003.tcl
deleted file mode 100644
index a535142838c..00000000000
--- a/storage/bdb/test/lock003.tcl
+++ /dev/null
@@ -1,101 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lock003.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST lock003
-# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
-# TEST testers that try to randomly obtain locks; make sure that the locks
-# TEST correctly protect corresponding objects.
-proc lock003 { {iter 500} {max 1000} {procs 5} } {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- set ldegree 5
- set objs 75
- set reads 65
- set wait 1
- set conflicts { 0 0 0 0 0 1 0 1 1}
- set seeds {}
-
- puts "Lock003: Multi-process random lock test"
-
- # Clean up after previous runs
- env_cleanup $testdir
-
- # Open/create the lock region
- puts "\tLock003.a: Create environment"
- set e [berkdb_env -create -lock -home $testdir]
- error_check_good env_open [is_substr $e env] 1
- $e lock_id_set $lock_curid $lock_maxid
-
- error_check_good env_close [$e close] 0
-
- # Now spawn off processes
- set pidlist {}
-
- for { set i 0 } {$i < $procs} {incr i} {
- if { [llength $seeds] == $procs } {
- set s [lindex $seeds $i]
- }
-# puts "$tclsh_path\
-# $test_path/wrap.tcl \
-# lockscript.tcl $testdir/$i.lockout\
-# $testdir $iter $objs $wait $ldegree $reads &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- lockscript.tcl $testdir/lock003.$i.out \
- $testdir $iter $objs $wait $ldegree $reads &]
- lappend pidlist $p
- }
-
- puts "\tLock003.b: $procs independent processes now running"
- watch_procs $pidlist 30 10800
-
- # Check for test failure
- set errstrings [eval findfail [glob $testdir/lock003.*.out]]
- foreach str $errstrings {
- puts "FAIL: error message in .out file: $str"
- }
-
- # Remove log files
- for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $testdir/lock003.$i.out
- }
-}
-
-# Create and destroy flag files to show we have an object locked, and
-# verify that the correct files exist or don't exist given that we've
-# just read or write locked a file.
-proc lock003_create { rw obj } {
- source ./include.tcl
-
- set pref $testdir/L3FLAG
- set f [open $pref.$rw.[pid].$obj w]
- close $f
-}
-
-proc lock003_destroy { obj } {
- source ./include.tcl
-
- set pref $testdir/L3FLAG
- set f [glob -nocomplain $pref.*.[pid].$obj]
- error_check_good l3_destroy [llength $f] 1
- fileremove $f
-}
-
-proc lock003_vrfy { rw obj } {
- source ./include.tcl
-
- set pref $testdir/L3FLAG
- if { [string compare $rw "write"] == 0 } {
- set fs [glob -nocomplain $pref.*.*.$obj]
- error_check_good "number of other locks on $obj" [llength $fs] 0
- } else {
- set fs [glob -nocomplain $pref.write.*.$obj]
- error_check_good "number of write locks on $obj" [llength $fs] 0
- }
-}
-
diff --git a/storage/bdb/test/lock004.tcl b/storage/bdb/test/lock004.tcl
deleted file mode 100644
index e71a51f9ba7..00000000000
--- a/storage/bdb/test/lock004.tcl
+++ /dev/null
@@ -1,29 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lock004.tcl,v 11.7 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST lock004
-# TEST Test locker ids wraping around.
-
-proc lock004 {} {
- source ./include.tcl
- global lock_curid
- global lock_maxid
-
- set save_curid $lock_curid
- set save_maxid $lock_maxid
-
- set lock_curid [expr $lock_maxid - 1]
- puts "Lock004: Locker id wraparound test"
- puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
-
- lock001
- lock002
- lock003
-
- set lock_curid $save_curid
- set lock_maxid $save_maxid
-}
diff --git a/storage/bdb/test/lock005.tcl b/storage/bdb/test/lock005.tcl
deleted file mode 100644
index 8b3b977ad31..00000000000
--- a/storage/bdb/test/lock005.tcl
+++ /dev/null
@@ -1,177 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lock005.tcl,v 1.10 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST lock005
-# TEST Check that page locks are being released properly.
-
-proc lock005 { } {
- source ./include.tcl
-
- puts "Lock005: Page lock release test"
-
- # Clean up after previous runs
- env_cleanup $testdir
-
- # Open/create the lock region
- set e [berkdb_env -create -lock -home $testdir -txn -log]
- error_check_good env_open [is_valid_env $e] TRUE
-
- # Open/create the database
- set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Check that records are locking by trying to
- # fetch a record on the wrong transaction.
- puts "\tLock005.a: Verify that we are locking"
-
- # Start the first transaction
- set txn1 [$e txn -nowait]
- error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
- set ret [catch {$db put -txn $txn1 -append record1} recno1]
- error_check_good dbput_txn1 $ret 0
-
- # Start second txn while the first is still running ...
- set txn2 [$e txn -nowait]
- error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
-
- # ... and try to get a record from the first txn (should fail)
- set ret [catch {$db get -txn $txn2 $recno1} res]
- error_check_good dbget_wrong_record \
- [is_substr $res "deadlock"] 1
-
- # End transactions
- error_check_good txn1commit [$txn1 commit] 0
- how_many_locks 1 $e
- error_check_good txn2commit [$txn2 commit] 0
- # The number of locks stays the same here because the first
- # lock is released and the second lock was never granted.
- how_many_locks 1 $e
-
- # Test lock behavior for both abort and commit
- puts "\tLock005.b: Verify locks after abort or commit"
- foreach endorder {forward reverse} {
- end_order_test $db $e commit abort $endorder
- end_order_test $db $e abort commit $endorder
- end_order_test $db $e commit commit $endorder
- end_order_test $db $e abort abort $endorder
- }
-
- # Clean up
- error_check_good db_close [$db close] 0
- error_check_good env_close [$e close] 0
-}
-
-proc end_order_test { db e txn1end txn2end endorder } {
- # Start one transaction
- set txn1 [$e txn -nowait]
- error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
- set ret [catch {$db put -txn $txn1 -append record1} recno1]
- error_check_good dbput_txn1 $ret 0
-
- # Check number of locks
- how_many_locks 2 $e
-
- # Start a second transaction while first is still running
- set txn2 [$e txn -nowait]
- error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
- set ret [catch {$db put -txn $txn2 -append record2} recno2]
- error_check_good dbput_txn2 $ret 0
- how_many_locks 3 $e
-
- # Now commit or abort one txn and make sure the other is okay
- if {$endorder == "forward"} {
- # End transaction 1 first
- puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
- error_check_good txn_$txn1end [$txn1 $txn1end] 0
- how_many_locks 2 $e
-
- # txn1 is now ended, but txn2 is still running
- set ret1 [catch {$db get -txn $txn2 $recno1} res1]
- set ret2 [catch {$db get -txn $txn2 $recno2} res2]
- if { $txn1end == "commit" } {
- error_check_good txn2_sees_txn1 $ret1 0
- error_check_good txn2_sees_txn2 $ret2 0
- } else {
- # transaction 1 was aborted
- error_check_good txn2_cantsee_txn1 [llength $res1] 0
- }
-
- # End transaction 2 second
- error_check_good txn_$txn2end [$txn2 $txn2end] 0
- how_many_locks 1 $e
-
- # txn1 and txn2 should both now be invalid
- # The get no longer needs to be transactional
- set ret3 [catch {$db get $recno1} res3]
- set ret4 [catch {$db get $recno2} res4]
-
- if { $txn2end == "commit" } {
- error_check_good txn2_sees_txn1 $ret3 0
- error_check_good txn2_sees_txn2 $ret4 0
- error_check_good txn2_has_record2 \
- [is_substr $res4 "record2"] 1
- } else {
- # transaction 2 was aborted
- error_check_good txn2_cantsee_txn1 $ret3 0
- error_check_good txn2_aborted [llength $res4] 0
- }
-
- } elseif { $endorder == "reverse" } {
- # End transaction 2 first
- puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
- error_check_good txn_$txn2end [$txn2 $txn2end] 0
- how_many_locks 2 $e
-
- # txn2 is ended, but txn1 is still running
- set ret1 [catch {$db get -txn $txn1 $recno1} res1]
- set ret2 [catch {$db get -txn $txn1 $recno2} res2]
- if { $txn2end == "commit" } {
- error_check_good txn1_sees_txn1 $ret1 0
- error_check_good txn1_sees_txn2 $ret2 0
- } else {
- # transaction 2 was aborted
- error_check_good txn1_cantsee_txn2 [llength $res2] 0
- }
-
- # End transaction 1 second
- error_check_good txn_$txn1end [$txn1 $txn1end] 0
- how_many_locks 1 $e
-
- # txn1 and txn2 should both now be invalid
- # The get no longer needs to be transactional
- set ret3 [catch {$db get $recno1} res3]
- set ret4 [catch {$db get $recno2} res4]
-
- if { $txn1end == "commit" } {
- error_check_good txn1_sees_txn1 $ret3 0
- error_check_good txn1_sees_txn2 $ret4 0
- error_check_good txn1_has_record1 \
- [is_substr $res3 "record1"] 1
- } else {
- # transaction 1 was aborted
- error_check_good txn1_cantsee_txn2 $ret4 0
- error_check_good txn1_aborted [llength $res3] 0
- }
- }
-}
-
-proc how_many_locks { expected env } {
- set stat [$env lock_stat]
- set str "Current number of locks"
- set checked 0
- foreach statpair $stat {
- if { $checked == 1 } {
- break
- }
- if { [is_substr [lindex $statpair 0] $str] != 0} {
- set checked 1
- set nlocks [lindex $statpair 1]
- error_check_good expected_nlocks $nlocks $expected
- }
- }
- error_check_good checked $checked 1
-}
diff --git a/storage/bdb/test/lockscript.tcl b/storage/bdb/test/lockscript.tcl
deleted file mode 100644
index f542c100b1f..00000000000
--- a/storage/bdb/test/lockscript.tcl
+++ /dev/null
@@ -1,117 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: lockscript.tcl,v 11.19 2004/01/28 03:36:28 bostic Exp $
-#
-# Random lock tester.
-# Usage: lockscript dir numiters numobjs sleepint degree readratio
-# dir: lock directory.
-# numiters: Total number of iterations.
-# numobjs: Number of objects on which to lock.
-# sleepint: Maximum sleep interval.
-# degree: Maximum number of locks to acquire at once
-# readratio: Percent of locks that should be reads.
-
-source ./include.tcl
-source $test_path/test.tcl
-
-set usage "lockscript dir numiters numobjs sleepint degree readratio"
-
-# Verify usage
-if { $argc != 6 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set numiters [ lindex $argv 1 ]
-set numobjs [ lindex $argv 2 ]
-set sleepint [ lindex $argv 3 ]
-set degree [ lindex $argv 4 ]
-set readratio [ lindex $argv 5 ]
-
-# Initialize random number generator
-global rand_init
-berkdb srand $rand_init
-
-
-catch { berkdb_env -create -lock -home $dir } e
-error_check_good env_open [is_substr $e env] 1
-catch { $e lock_id } locker
-error_check_good locker [is_valid_locker $locker] TRUE
-
-puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
-puts "$sleepint $degree $readratio"
-flush stdout
-
-for { set iter 0 } { $iter < $numiters } { incr iter } {
- set nlocks [berkdb random_int 1 $degree]
- # We will always lock objects in ascending order to avoid
- # deadlocks.
- set lastobj 1
- set locklist {}
- set objlist {}
- for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
- # Pick lock parameters
- set obj [berkdb random_int $lastobj $numobjs]
- set lastobj [expr $obj + 1]
- set x [berkdb random_int 1 100 ]
- if { $x <= $readratio } {
- set rw read
- } else {
- set rw write
- }
- puts "[timestamp -c] $locker $lnum: $rw $obj"
-
- # Do get; add to list
- catch {$e lock_get $rw $locker $obj} lockp
- error_check_good lock_get [is_valid_lock $lockp $e] TRUE
-
- # Create a file to flag that we've a lock of the given
- # type, after making sure only other read locks exist
- # (if we're read locking) or no other locks exist (if
- # we're writing).
- lock003_vrfy $rw $obj
- lock003_create $rw $obj
- lappend objlist [list $obj $rw]
-
- lappend locklist $lockp
- if {$lastobj > $numobjs} {
- break
- }
- }
- # Pick sleep interval
- puts "[timestamp -c] $locker sleeping"
- # We used to sleep 1 to $sleepint seconds. This makes the test
- # run for hours. Instead, make it sleep for 10 to $sleepint * 100
- # milliseconds, for a maximum sleep time of 0.5 s.
- after [berkdb random_int 10 [expr $sleepint * 100]]
- puts "[timestamp -c] $locker awake"
-
- # Now release locks
- puts "[timestamp -c] $locker released locks"
-
- # Delete our locking flag files, then reverify. (Note that the
- # locking flag verification function assumes that our own lock
- # is not currently flagged.)
- foreach pair $objlist {
- set obj [lindex $pair 0]
- set rw [lindex $pair 1]
- lock003_destroy $obj
- lock003_vrfy $rw $obj
- }
-
- release_list $locklist
- flush stdout
-}
-
-set ret [$e close]
-error_check_good env_close $ret 0
-
-puts "[timestamp -c] $locker Complete"
-flush stdout
-
-exit
diff --git a/storage/bdb/test/log001.tcl b/storage/bdb/test/log001.tcl
deleted file mode 100644
index cec09b84fd6..00000000000
--- a/storage/bdb/test/log001.tcl
+++ /dev/null
@@ -1,144 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log001.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $
-#
-
-# TEST log001
-# TEST Read/write log records.
-# TEST Test with and without fixed-length, in-memory logging,
-# TEST and encryption.
-proc log001 { } {
- global passwd
- global has_crypto
- global rand_init
-
- berkdb srand $rand_init
- set iter 1000
-
- set max [expr 1024 * 128]
- foreach fixedlength { 0 1 } {
- foreach inmem { 1 0 } {
- log001_body $max $iter $fixedlength $inmem
- log001_body $max [expr $iter * 15] $fixedlength $inmem
-
- # Skip encrypted tests if not supported.
- if { $has_crypto == 0 } {
- continue
- }
- log001_body $max\
- $iter $fixedlength $inmem "-encryptaes $passwd"
- log001_body $max\
- [expr $iter * 15] $fixedlength $inmem "-encryptaes $passwd"
- }
- }
-}
-
-proc log001_body { max nrecs fixedlength inmem {encargs ""} } {
- source ./include.tcl
-
- puts -nonewline "Log001: Basic put/get log records: "
- if { $fixedlength == 1 } {
- puts -nonewline "fixed-length ($encargs)"
- } else {
- puts -nonewline "variable-length ($encargs)"
- }
-
- # In-memory logging requires a large enough log buffer that
- # any active transaction can be aborted.
- if { $inmem == 1 } {
- set lbuf [expr 8 * [expr 1024 * 1024]]
- puts " with in-memory logging."
- } else {
- puts " with on-disk logging."
- }
-
- env_cleanup $testdir
-
- set logargs ""
- if { $inmem == 1 } {
- set logargs "-log_inmemory -log_buffer $lbuf"
- }
- set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
- $encargs $logargs -log_max $max]
- error_check_good envopen [is_valid_env $env] TRUE
-
- # We will write records to the log and make sure we can
- # read them back correctly. We'll use a standard pattern
- # repeated some number of times for each record.
- set lsn_list {}
- set rec_list {}
- puts "\tLog001.a: Writing $nrecs log records"
- for { set i 0 } { $i < $nrecs } { incr i } {
- set rec ""
- for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
- set rec $rec$i:logrec:$i
- }
- if { $fixedlength != 1 } {
- set rec $rec:[random_data 237 0 0]
- }
- set lsn [$env log_put $rec]
- error_check_bad log_put [is_substr $lsn log_cmd] 1
- lappend lsn_list $lsn
- lappend rec_list $rec
- }
-
- # Open a log cursor.
- set logc [$env log_cursor]
- error_check_good logc [is_valid_logc $logc $env] TRUE
-
- puts "\tLog001.b: Retrieving log records sequentially (forward)"
- set i 0
- for { set grec [$logc get -first] } { [llength $grec] != 0 } {
- set grec [$logc get -next]} {
- error_check_good log_get:seq [lindex $grec 1] \
- [lindex $rec_list $i]
- incr i
- }
-
- puts "\tLog001.c: Retrieving log records sequentially (backward)"
- set i [llength $rec_list]
- for { set grec [$logc get -last] } { [llength $grec] != 0 } {
- set grec [$logc get -prev] } {
- incr i -1
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- }
-
- puts "\tLog001.d: Retrieving log records sequentially by LSN"
- set i 0
- foreach lsn $lsn_list {
- set grec [$logc get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- incr i
- }
-
- puts "\tLog001.e: Retrieving log records randomly by LSN"
- set m [expr [llength $lsn_list] - 1]
- for { set i 0 } { $i < $nrecs } { incr i } {
- set recno [berkdb random_int 0 $m ]
- set lsn [lindex $lsn_list $recno]
- set grec [$logc get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
- }
-
- puts "\tLog001.f: Retrieving first/current, last/current log record"
- set grec [$logc get -first]
- error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
- set grec [$logc get -current]
- error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
- set i [expr [llength $rec_list] - 1]
- set grec [$logc get -last]
- error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- set grec [$logc get -current]
- error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
-
- # Close and unlink the file
- error_check_good log_cursor:close:$logc [$logc close] 0
- error_check_good env:close [$env close] 0
- error_check_good envremove [berkdb envremove -home $testdir] 0
-}
diff --git a/storage/bdb/test/log002.tcl b/storage/bdb/test/log002.tcl
deleted file mode 100644
index 1c8f2b91979..00000000000
--- a/storage/bdb/test/log002.tcl
+++ /dev/null
@@ -1,102 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log002.tcl,v 11.33 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST log002
-# TEST Tests multiple logs
-# TEST Log truncation
-# TEST LSN comparison and file functionality.
-proc log002 { } {
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- foreach inmem { 1 0 } {
- log002_body $inmem
- }
-}
-
-proc log002_body { inmem } {
- source ./include.tcl
-
- puts "Log002: Multiple log test w/trunc, file, compare functionality"
-
- env_cleanup $testdir
-
- set max [expr 1024 * 128]
-
- set logargs ""
- if { $inmem == 0 } {
- puts "Log002: Using on-disk logging."
- } else {
- puts "Log002: Using in-memory logging."
- set lbuf [expr 8 * [expr 1024 * 1024]]
- set logargs "-log_inmemory -log_buffer $lbuf"
- }
- set env [eval {berkdb_env} -create -home $testdir -log \
- -mode 0644 $logargs -log_max $max]
- error_check_good envopen [is_valid_env $env] TRUE
-
- # We'll record every hundredth record for later use
- set info_list {}
-
- puts "\tLog002.a: Writing log records"
- set i 0
- for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
- set rec [random_data 120 0 0]
- set len [string length $rec]
- set lsn [$env log_put $rec]
-
- if { [expr $i % 100 ] == 0 } {
- lappend info_list [list $lsn $rec]
- }
- incr i
- }
-
- puts "\tLog002.b: Checking log_compare"
- set last {0 0}
- foreach p $info_list {
- set l [lindex $p 0]
- if { [llength $last] != 0 } {
- error_check_good \
- log_compare [$env log_compare $l $last] 1
- error_check_good \
- log_compare [$env log_compare $last $l] -1
- error_check_good \
- log_compare [$env log_compare $l $l] 0
- }
- set last $l
- }
-
- puts "\tLog002.c: Checking log_file"
- if { $inmem == 0 } {
- set flist [glob $testdir/log*]
- foreach p $info_list {
- set lsn [lindex $p 0]
- set f [$env log_file $lsn]
-
- # Change backslash separators on Windows to forward
- # slashes, as the rest of the test suite expects.
- regsub -all {\\} $f {/} f
- error_check_bad log_file:$f [lsearch $flist $f] -1
- }
- }
-
- puts "\tLog002.d: Verifying records"
-
- set logc [$env log_cursor]
- error_check_good log_cursor [is_valid_logc $logc $env] TRUE
-
- for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
- set p [lindex $info_list $i]
- set grec [$logc get -set [lindex $p 0]]
- error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
- }
-
- # Close and unlink the file
- error_check_good log_cursor:close:$logc [$logc close] 0
- error_check_good env:close [$env close] 0
- error_check_good envremove [berkdb envremove -home $testdir] 0
-}
diff --git a/storage/bdb/test/log003.tcl b/storage/bdb/test/log003.tcl
deleted file mode 100644
index e8d10dbfc9b..00000000000
--- a/storage/bdb/test/log003.tcl
+++ /dev/null
@@ -1,144 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log003.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST log003
-# TEST Verify that log_flush is flushing records correctly.
-proc log003 { } {
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- # Even though log_flush doesn't do anything for in-memory
- # logging, we want to make sure calling it doesn't break
- # anything.
- foreach inmem { 1 0 } {
- log003_body $inmem
- }
-}
-
-proc log003_body { inmem } {
- source ./include.tcl
-
- puts -nonewline "Log003: Verify log_flush behavior"
- if { $inmem == 0 } {
- puts " (on-disk logging)."
- } else {
- puts " (in-memory logging)."
- }
-
- set max [expr 1024 * 128]
- env_cleanup $testdir
- set short_rec "abcdefghijklmnopqrstuvwxyz"
- set long_rec [repeat $short_rec 200]
- set very_long_rec [repeat $long_rec 4]
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
-
- set logargs ""
- if { $inmem == 1 } {
- set logargs "-log_inmemory -log_buffer [expr $max * 2]"
- }
- set env [eval {berkdb_env} -log -home $testdir -create \
- -mode 0644 $logargs -log_max $max]
- error_check_good envopen [is_valid_env $env] TRUE
-
- set lsn [$env log_put $rec]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- set ret [$env log_flush $lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash"
- set ret [$env close]
- error_check_good log_env:close $ret 0
-
- # Now, remove the log region
- #set ret [berkdb envremove -home $testdir]
- #error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb_env -create -home $testdir \
- -log -mode 0644 -log_max $max]
- error_check_good envopen [is_valid_env $env] TRUE
-
- set logc [$env log_cursor]
- error_check_good log_cursor [is_valid_logc $logc $env] TRUE
-
- set gotrec [$logc get -first]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good log_cursor:close:$logc [$logc close] 0
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove [berkdb envremove -home $testdir] 0
- log_cleanup $testdir
- }
-
- if { $inmem == 1 } {
- puts "Log003: Skipping remainder of test for in-memory logging."
- return
- }
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "\tLog003.b: \
- Verify flush on non-last record [string length $rec]"
-
- set env [berkdb_env -log -home $testdir \
- -create -mode 0644 -log_max $max]
-
- error_check_good envopen [is_valid_env $env] TRUE
-
- # Put 10 random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Put the record we are interested in
- set save_lsn [$env log_put $rec]
- error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
-
- # Put 10 more random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Now check the flush
- set ret [$env log_flush $save_lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash".
- #
- # Now, close and remove the log region
- error_check_good env:close:$env [$env close] 0
- set ret [berkdb envremove -home $testdir]
- error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb_env -log -home $testdir \
- -create -mode 0644 -log_max $max]
- error_check_good envopen [is_valid_env $env] TRUE
-
- set logc [$env log_cursor]
- error_check_good log_cursor [is_valid_logc $logc $env] TRUE
-
- set gotrec [$logc get -set $save_lsn]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good log_cursor:close:$logc [$logc close] 0
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove [berkdb envremove -home $testdir] 0
- log_cleanup $testdir
- }
-}
diff --git a/storage/bdb/test/log004.tcl b/storage/bdb/test/log004.tcl
deleted file mode 100644
index 15af405f5ce..00000000000
--- a/storage/bdb/test/log004.tcl
+++ /dev/null
@@ -1,52 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log004.tcl,v 11.31 2004/07/19 16:08:36 carol Exp $
-#
-
-# TEST log004
-# TEST Make sure that if we do PREVs on a log, but the beginning of the
-# TEST log has been truncated, we do the right thing.
-proc log004 { } {
- foreach inmem { 1 0 } {
- log004_body $inmem
- }
-}
-
-proc log004_body { inmem } {
- source ./include.tcl
-
- puts "Log004: Prev on log when beginning of log has been truncated."
- # Use archive test to populate log
- env_cleanup $testdir
- puts "\tLog004.a: Call archive to populate log."
- archive $inmem
-
- # Delete all log files under 100
- puts "\tLog004.b: Delete all log files under 100."
- set ret [catch { glob $testdir/log.00000000* } result]
- if { $ret == 0 } {
- eval fileremove -f $result
- }
-
- # Now open the log and get the first record and try a prev
- puts "\tLog004.c: Open truncated log, attempt to access missing portion."
- set env [berkdb_env -create -log -home $testdir]
- error_check_good envopen [is_valid_env $env] TRUE
-
- set logc [$env log_cursor]
- error_check_good log_cursor [is_valid_logc $logc $env] TRUE
-
- set ret [$logc get -first]
- error_check_bad log_get [llength $ret] 0
-
- # This should give DB_NOTFOUND which is a ret of length 0
- catch {$logc get -prev} ret
- error_check_good log_get_prev [string length $ret] 0
-
- puts "\tLog004.d: Close log and environment."
- error_check_good log_cursor_close [$logc close] 0
- error_check_good log_close [$env close] 0
-}
diff --git a/storage/bdb/test/log005.tcl b/storage/bdb/test/log005.tcl
deleted file mode 100644
index ea6e3fa3304..00000000000
--- a/storage/bdb/test/log005.tcl
+++ /dev/null
@@ -1,118 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log005.tcl,v 11.6 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST log005
-# TEST Check that log file sizes can change on the fly.
-proc log005 { } {
-
- # Skip the test for HP-UX, where we can't do the second
- # env open.
- global is_hp_test
- if { $is_hp_test == 1 } {
- puts "Log005: Skipping for HP-UX."
- return
- }
-
- foreach inmem { 1 0 } {
- log005_body $inmem
- }
-}
-proc log005_body { inmem } {
- source ./include.tcl
- env_cleanup $testdir
-
- puts -nonewline "Log005: Check that log file sizes can change"
- if { $inmem == 0 } {
- puts " (on-disk logging)."
- } else {
- puts " (in-memory logging)."
- }
-
- # Open the environment, set and check the log file size.
- puts "\tLog005.a: open, set and check the log file size."
- set logargs ""
- if { $inmem == 1 } {
- set lbuf [expr 1024 * 1024]
- set logargs "-log_inmemory -log_buffer $lbuf"
- }
- set env [eval {berkdb_env} -create -home $testdir \
- $logargs -log_max 1000000 -txn]
- error_check_good envopen [is_valid_env $env] TRUE
- set db [berkdb_open \
- -env $env -create -mode 0644 -btree -auto_commit a.db]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Get the current log file maximum.
- set max [log005_stat $env "Current log file size"]
- error_check_good max_set $max 1000000
-
- # Reset the log file size using a second open, and make sure
- # it changes.
- puts "\tLog005.b: reset during open, check the log file size."
- set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
- error_check_good envtmp_open [is_valid_env $envtmp] TRUE
- error_check_good envtmp_close [$envtmp close] 0
-
- set tmp [log005_stat $env "Current log file size"]
- error_check_good max_changed 900000 $tmp
-
- puts "\tLog005.c: fill in the current log file size."
- # Fill in the current log file.
- set new_lsn 0
- set data [repeat "a" 1024]
- for { set i 1 } \
- { [log005_stat $env "Current log file number"] != 2 } \
- { incr i } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set ret [$db put -txn $t $i $data]
- error_check_good put $ret 0
- error_check_good txn [$t commit] 0
-
- set last_lsn $new_lsn
- set new_lsn [log005_stat $env "Current log file offset"]
- }
-
- # The last LSN in the first file should be more than our new
- # file size.
- error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
-
- # Close down the environment.
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
-
- if { $inmem == 1 } {
- puts "Log005: Skipping remainder of test for in-memory logging."
- return
- }
-
- puts "\tLog005.d: check the log file size is unchanged after recovery."
- # Open again, running recovery. Verify the log file size is as we
- # left it.
- set env [berkdb_env -create -home $testdir -recover -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set tmp [log005_stat $env "Current log file size"]
- error_check_good after_recovery 900000 $tmp
-
- error_check_good env_close [$env close] 0
-}
-
-# log005_stat --
-# Return the current log statistics.
-proc log005_stat { env s } {
- set stat [$env log_stat]
- foreach statpair $stat {
- set statmsg [lindex $statpair 0]
- set statval [lindex $statpair 1]
- if {[is_substr $statmsg $s] != 0} {
- return $statval
- }
- }
- puts "FAIL: log005: stat string $s not found"
- return 0
-}
diff --git a/storage/bdb/test/logtrack.tcl b/storage/bdb/test/logtrack.tcl
deleted file mode 100644
index 50851932fc4..00000000000
--- a/storage/bdb/test/logtrack.tcl
+++ /dev/null
@@ -1,139 +0,0 @@
-# See the file LICENSE for redistribution information
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: logtrack.tcl,v 11.15 2004/04/14 16:08:42 carol Exp $
-#
-# logtrack.tcl: A collection of routines, formerly implemented in Perl
-# as log.pl, to track which log record types the test suite hits.
-
-set ltsname "logtrack_seen.db"
-set ltlist $test_path/logtrack.list
-set tmpname "logtrack_tmp"
-
-proc logtrack_clean { } {
- global ltsname
-
- file delete -force $ltsname
-
- return
-}
-
-proc logtrack_init { } {
- global ltsname
-
- logtrack_clean
-
- # Create an empty tracking database.
- [berkdb_open -create -truncate -btree $ltsname] close
-
- return
-}
-
-# Dump the logs for directory dirname and record which log
-# records were seen.
-proc logtrack_read { dirname } {
- global ltsname tmpname util_path
- global encrypt passwd
-
- set seendb [berkdb_open $ltsname]
- error_check_good seendb_open [is_valid_db $seendb] TRUE
-
- file delete -force $tmpname
- set pargs " -N -h $dirname "
- if { $encrypt > 0 } {
- append pargs " -P $passwd "
- }
- set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
- error_check_good printlog $ret 0
- error_check_good tmpfile_exists [file exists $tmpname] 1
-
- set f [open $tmpname r]
- while { [gets $f record] >= 0 } {
- set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
- if { $r == 1 } {
- error_check_good seendb_put [$seendb put $name ""] 0
- }
- }
- close $f
- file delete -force $tmpname
-
- error_check_good seendb_close [$seendb close] 0
-}
-
-# Print the log record types that were seen but should not have been
-# seen and the log record types that were not seen but should have been seen.
-proc logtrack_summary { } {
- global ltsname ltlist testdir
- global one_test
-
- set seendb [berkdb_open $ltsname]
- error_check_good seendb_open [is_valid_db $seendb] TRUE
- set existdb [berkdb_open -create -btree]
- error_check_good existdb_open [is_valid_db $existdb] TRUE
- set deprecdb [berkdb_open -create -btree]
- error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
-
- error_check_good ltlist_exists [file exists $ltlist] 1
- set f [open $ltlist r]
- set pref ""
- while { [gets $f line] >= 0 } {
- # Get the keyword, the first thing on the line:
- # BEGIN/DEPRECATED/IGNORED/PREFIX
- set keyword [lindex $line 0]
-
- if { [string compare $keyword PREFIX] == 0 } {
- # New prefix.
- set pref [lindex $line 1]
- } elseif { [string compare $keyword BEGIN] == 0 } {
- # A log type we care about; put it on our list.
-
- # Skip noop and debug.
- if { [string compare [lindex $line 1] noop] == 0 } {
- continue
- }
- if { [string compare [lindex $line 1] debug] == 0 } {
- continue
- }
-
- error_check_good exist_put [$existdb put \
- ${pref}_[lindex $line 1] ""] 0
- } elseif { [string compare $keyword DEPRECATED] == 0 ||
- [string compare $keyword IGNORED] == 0 } {
- error_check_good deprec_put [$deprecdb put \
- ${pref}_[lindex $line 1] ""] 0
- }
- }
-
- error_check_good exist_curs \
- [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
- while { [llength [set dbt [$ec get -next]]] != 0 } {
- set rec [lindex [lindex $dbt 0] 0]
- if { [$seendb count $rec] == 0 && $one_test == "ALL" } {
- puts "WARNING: log record type $rec: not tested"
- }
- }
- error_check_good exist_curs_close [$ec close] 0
-
- error_check_good seen_curs \
- [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
- while { [llength [set dbt [$sc get -next]]] != 0 } {
- set rec [lindex [lindex $dbt 0] 0]
- if { [$existdb count $rec] == 0 } {
- if { [$deprecdb count $rec] == 0 } {
- puts "WARNING: log record type $rec: unknown"
- } else {
- puts \
- "WARNING: log record type $rec: deprecated"
- }
- }
- }
- error_check_good seen_curs_close [$sc close] 0
-
- error_check_good seendb_close [$seendb close] 0
- error_check_good existdb_close [$existdb close] 0
- error_check_good deprecdb_close [$deprecdb close] 0
-
- logtrack_clean
-}
diff --git a/storage/bdb/test/mdbscript.tcl b/storage/bdb/test/mdbscript.tcl
deleted file mode 100644
index 88433485a94..00000000000
--- a/storage/bdb/test/mdbscript.tcl
+++ /dev/null
@@ -1,400 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mdbscript.tcl,v 11.33 2004/01/28 03:36:28 bostic Exp $
-#
-# Process script for the multi-process db tester.
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-global dbenv
-global klock
-global l_keys
-global procid
-global alphabet
-
-# In Tcl, when there are multiple catch handlers, *all* handlers
-# are called, so we have to resort to this hack.
-#
-global exception_handled
-
-set exception_handled 0
-
-set datastr $alphabet$alphabet
-
-# Usage: mdbscript dir file nentries iter procid procs seed
-# dir: DBHOME directory
-# file: db file on which to operate
-# nentries: number of entries taken from dictionary
-# iter: number of operations to run
-# procid: this processes' id number
-# procs: total number of processes running
-set usage "mdbscript method dir file nentries iter procid procs"
-
-# Verify usage
-if { $argc != 7 } {
- puts "FAIL:[timestamp] test042: Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set method [lindex $argv 0]
-set dir [lindex $argv 1]
-set file [lindex $argv 2]
-set nentries [ lindex $argv 3 ]
-set iter [ lindex $argv 4 ]
-set procid [ lindex $argv 5 ]
-set procs [ lindex $argv 6 ]
-
-set pflags ""
-set gflags ""
-set txn ""
-
-set renum [is_rrecno $method]
-set omethod [convert_method $method]
-
-if { [is_record_based $method] == 1 } {
- append gflags " -recno"
-}
-
-# Initialize seed
-global rand_init
-
-# We want repeatable results, but we also want each instance of mdbscript
-# to do something different. So we add the procid to the fixed seed.
-# (Note that this is a serial number given by the caller, not a pid.)
-berkdb srand [expr $rand_init + $procid]
-
-puts "Beginning execution for [pid] $method"
-puts "$dir db_home"
-puts "$file database"
-puts "$nentries data elements"
-puts "$iter iterations"
-puts "$procid process id"
-puts "$procs processes"
-
-set klock NOLOCK
-
-# Note: all I/O operations, and especially flush, are expensive
-# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
-# flushes in the main part of the loop below.
-flush stdout
-
-set dbenv [berkdb_env -create -cdb -home $dir]
-#set dbenv [berkdb_env -create -cdb -log -home $dir]
-error_check_good dbenv [is_valid_env $dbenv] TRUE
-
-set locker [ $dbenv lock_id ]
-
-set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
-error_check_good dbopen [is_valid_db $db] TRUE
-
-# Init globals (no data)
-set nkeys [db_init $db 0]
-puts "Initial number of keys: $nkeys"
-tclsleep 5
-
-proc get_lock { k } {
- global dbenv
- global procid
- global locker
- global klock
- global DB_LOCK_WRITE
- global DB_LOCK_NOWAIT
- global errorInfo
- global exception_handled
- # Make sure that the key isn't in the middle of
- # a delete operation
- if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
- set exception_handled 1
-
- error_check_good \
- get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
- puts "Warning: key $k locked"
- set klock NOLOCK
- return 1
- } else {
- error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
- }
- return 0
-}
-
-# If we are renumbering, then each time we delete an item, the number of
-# items in the file is temporarily decreased, so the highest record numbers
-# do not exist. To make sure this doesn't happen, we never generate the
-# highest few record numbers as keys.
-#
-# For record-based methods, record numbers begin at 1, while for other keys,
-# we begin at 0 to index into an array.
-proc rand_key { method nkeys renum procs} {
- if { $renum == 1 } {
- return [berkdb random_int 1 [expr $nkeys - $procs]]
- } elseif { [is_record_based $method] == 1 } {
- return [berkdb random_int 1 $nkeys]
- } else {
- return [berkdb random_int 0 [expr $nkeys - 1]]
- }
-}
-
-# On each iteration we're going to randomly pick a key.
-# 1. We'll either get it (verifying that its contents are reasonable).
-# 2. Put it (using an overwrite to make the data be datastr:ID).
-# 3. Get it and do a put through the cursor, tacking our ID on to
-# 4. Get it, read forward some random number of keys.
-# 5. Get it, read forward some random number of keys and do a put (replace).
-# 6. Get it, read forward some random number of keys and do a del. And then
-# do a put of the key.
-set gets 0
-set getput 0
-set overwrite 0
-set seqread 0
-set seqput 0
-set seqdel 0
-set dlen [string length $datastr]
-
-for { set i 0 } { $i < $iter } { incr i } {
- set op [berkdb random_int 0 5]
- puts "iteration $i operation $op"
- set close_cursor 0
- if {[catch {
- switch $op {
- 0 {
- incr gets
- set k [rand_key $method $nkeys $renum $procs]
- if {[is_record_based $method] == 1} {
- set key $k
- } else {
- set key [lindex $l_keys $k]
- }
-
- if { [get_lock $key] == 1 } {
- incr i -1
- continue;
- }
-
- set rec [eval {$db get} $txn $gflags {$key}]
- error_check_bad "$db get $key" [llength $rec] 0
- set partial [string range \
- [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
- error_check_good \
- "$db get $key" $partial [pad_data $method $datastr]
- }
- 1 {
- incr overwrite
- set k [rand_key $method $nkeys $renum $procs]
- if {[is_record_based $method] == 1} {
- set key $k
- } else {
- set key [lindex $l_keys $k]
- }
-
- set data $datastr:$procid
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $data]}]
- error_check_good "$db put $key" $ret 0
- }
- 2 {
- incr getput
- set dbc [$db cursor -update]
- error_check_good "$db cursor" \
- [is_valid_cursor $dbc $db] TRUE
- set close_cursor 1
- set k [rand_key $method $nkeys $renum $procs]
- if {[is_record_based $method] == 1} {
- set key $k
- } else {
- set key [lindex $l_keys $k]
- }
-
- if { [get_lock $key] == 1 } {
- incr i -1
- error_check_good "$dbc close" \
- [$dbc close] 0
- set close_cursor 0
- continue;
- }
-
- set ret [$dbc get -set $key]
- error_check_good \
- "$dbc get $key" [llength [lindex $ret 0]] 2
- set rec [lindex [lindex $ret 0] 1]
- set partial [string range $rec 0 [expr $dlen - 1]]
- error_check_good \
- "$dbc get $key" $partial [pad_data $method $datastr]
- append rec ":$procid"
- set ret [$dbc put \
- -current [chop_data $method $rec]]
- error_check_good "$dbc put $key" $ret 0
- error_check_good "$dbc close" [$dbc close] 0
- set close_cursor 0
- }
- 3 -
- 4 -
- 5 {
- if { $op == 3 } {
- set flags ""
- } else {
- set flags -update
- }
- set dbc [eval {$db cursor} $flags]
- error_check_good "$db cursor" \
- [is_valid_cursor $dbc $db] TRUE
- set close_cursor 1
- set k [rand_key $method $nkeys $renum $procs]
- if {[is_record_based $method] == 1} {
- set key $k
- } else {
- set key [lindex $l_keys $k]
- }
-
- if { [get_lock $key] == 1 } {
- incr i -1
- error_check_good "$dbc close" \
- [$dbc close] 0
- set close_cursor 0
- continue;
- }
-
- set ret [$dbc get -set $key]
- error_check_good \
- "$dbc get $key" [llength [lindex $ret 0]] 2
-
- # Now read a few keys sequentially
- set nloop [berkdb random_int 0 10]
- if { [berkdb random_int 0 1] == 0 } {
- set flags -next
- } else {
- set flags -prev
- }
- while { $nloop > 0 } {
- set lastret $ret
- set ret [eval {$dbc get} $flags]
- # Might read beginning/end of file
- if { [llength $ret] == 0} {
- set ret $lastret
- break
- }
- incr nloop -1
- }
- switch $op {
- 3 {
- incr seqread
- }
- 4 {
- incr seqput
- set rec [lindex [lindex $ret 0] 1]
- set partial [string range $rec 0 \
- [expr $dlen - 1]]
- error_check_good "$dbc get $key" \
- $partial [pad_data $method $datastr]
- append rec ":$procid"
- set ret [$dbc put -current \
- [chop_data $method $rec]]
- error_check_good \
- "$dbc put $key" $ret 0
- }
- 5 {
- incr seqdel
- set k [lindex [lindex $ret 0] 0]
- # We need to lock the item we're
- # deleting so that someone else can't
- # try to do a get while we're
- # deleting
- error_check_good "$klock put" \
- [$klock put] 0
- set klock NOLOCK
- set cur [$dbc get -current]
- error_check_bad get_current \
- [llength $cur] 0
- set key [lindex [lindex $cur 0] 0]
- if { [get_lock $key] == 1 } {
- incr i -1
- error_check_good "$dbc close" \
- [$dbc close] 0
- set close_cursor 0
- continue
- }
- set ret [$dbc del]
- error_check_good "$dbc del" $ret 0
- set rec $datastr
- append rec ":$procid"
- if { $renum == 1 } {
- set ret [$dbc put -before \
- [chop_data $method $rec]]
- error_check_good \
- "$dbc put $k" $ret $k
- } elseif { \
- [is_record_based $method] == 1 } {
- error_check_good "$dbc close" \
- [$dbc close] 0
- set close_cursor 0
- set ret [$db put $k \
- [chop_data $method $rec]]
- error_check_good \
- "$db put $k" $ret 0
- } else {
- set ret [$dbc put -keylast $k \
- [chop_data $method $rec]]
- error_check_good \
- "$dbc put $k" $ret 0
- }
- }
- }
- if { $close_cursor == 1 } {
- error_check_good \
- "$dbc close" [$dbc close] 0
- set close_cursor 0
- }
- }
- }
- } res] != 0} {
- global errorInfo;
- global exception_handled;
-
- puts $errorInfo
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
-
- if { [string compare $klock NOLOCK] != 0 } {
- catch {$klock put}
- }
- if {$close_cursor == 1} {
- catch {$dbc close}
- set close_cursor 0
- }
-
- if {[string first FAIL $theError] == 0 && \
- $exception_handled != 1} {
- flush stdout
- error "FAIL:[timestamp] test042: key $k: $theError"
- }
- set exception_handled 0
- } else {
- if { [string compare $klock NOLOCK] != 0 } {
- error_check_good "$klock put" [$klock put] 0
- set klock NOLOCK
- }
- }
-}
-
-error_check_good db_close_catch [catch {$db close} ret] 0
-error_check_good db_close $ret 0
-error_check_good dbenv_close [$dbenv close] 0
-
-flush stdout
-exit
-
-puts "[timestamp] [pid] Complete"
-puts "Successful ops: "
-puts "\t$gets gets"
-puts "\t$overwrite overwrites"
-puts "\t$getput getputs"
-puts "\t$seqread seqread"
-puts "\t$seqput seqput"
-puts "\t$seqdel seqdel"
-flush stdout
diff --git a/storage/bdb/test/memp001.tcl b/storage/bdb/test/memp001.tcl
deleted file mode 100644
index 4818072debd..00000000000
--- a/storage/bdb/test/memp001.tcl
+++ /dev/null
@@ -1,199 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: memp001.tcl,v 11.53 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST memp001
-# TEST Randomly updates pages.
-proc memp001 { } {
-
- memp001_body 1 ""
- memp001_body 3 ""
- memp001_body 1 -private
- memp001_body 3 -private
- memp001_body 1 "-system_mem -shm_key 1"
- memp001_body 3 "-system_mem -shm_key 1"
-
-}
-
-proc memp001_body { ncache flags } {
- source ./include.tcl
- global rand_init
-
- set nfiles 5
- set iter 500
- set psize 512
- set cachearg "-cachesize {0 400000 $ncache}"
-
- puts \
-"Memp001: { $flags } random update $iter iterations on $nfiles files."
- #
- # Check if this platform supports this set of flags
- #
- if { [mem_chk $flags] == 1 } {
- return
- }
-
- env_cleanup $testdir
- puts "\tMemp001.a: Create env with $ncache caches"
- set env [eval {berkdb_env -create -mode 0644} \
- $cachearg {-home $testdir} $flags]
- error_check_good env_open [is_valid_env $env] TRUE
-
- #
- # Do a simple mpool_stat call to verify the number of caches
- # just to exercise the stat code.
- set stat [$env mpool_stat]
- set str "Number of caches"
- set checked 0
- foreach statpair $stat {
- if { $checked == 1 } {
- break
- }
- if { [is_substr [lindex $statpair 0] $str] != 0} {
- set checked 1
- error_check_good ncache [lindex $statpair 1] $ncache
- }
- }
- error_check_good checked $checked 1
-
- # Open N memp files
- puts "\tMemp001.b: Create $nfiles mpool files"
- for {set i 1} {$i <= $nfiles} {incr i} {
- set fname "data_file.$i"
- file_create $testdir/$fname 50 $psize
-
- set mpools($i) \
- [$env mpool -create -pagesize $psize -mode 0644 $fname]
- error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
- }
-
- # Now, loop, picking files at random
- berkdb srand $rand_init
- puts "\tMemp001.c: Random page replacement loop"
- for {set i 0} {$i < $iter} {incr i} {
- set mpool $mpools([berkdb random_int 1 $nfiles])
- set p(1) [get_range $mpool 10]
- set p(2) [get_range $mpool 10]
- set p(3) [get_range $mpool 10]
- set p(1) [replace $mpool $p(1)]
- set p(3) [replace $mpool $p(3)]
- set p(4) [get_range $mpool 20]
- set p(4) [replace $mpool $p(4)]
- set p(5) [get_range $mpool 10]
- set p(6) [get_range $mpool 20]
- set p(7) [get_range $mpool 10]
- set p(8) [get_range $mpool 20]
- set p(5) [replace $mpool $p(5)]
- set p(6) [replace $mpool $p(6)]
- set p(9) [get_range $mpool 40]
- set p(9) [replace $mpool $p(9)]
- set p(10) [get_range $mpool 40]
- set p(7) [replace $mpool $p(7)]
- set p(8) [replace $mpool $p(8)]
- set p(9) [replace $mpool $p(9)]
- set p(10) [replace $mpool $p(10)]
- #
- # We now need to put all the pages we have here or
- # else they end up pinned.
- #
- for {set x 1} { $x <= 10} {incr x} {
- error_check_good pgput [$p($x) put] 0
- }
- }
-
- # Close N memp files, close the environment.
- puts "\tMemp001.d: Close mpools"
- for {set i 1} {$i <= $nfiles} {incr i} {
- error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
- }
- error_check_good envclose [$env close] 0
-
- for {set i 1} {$i <= $nfiles} {incr i} {
- fileremove -f $testdir/data_file.$i
- }
-}
-
-proc file_create { fname nblocks blocksize } {
- set fid [open $fname w]
- for {set i 0} {$i < $nblocks} {incr i} {
- seek $fid [expr $i * $blocksize] start
- puts -nonewline $fid $i
- }
- seek $fid [expr $nblocks * $blocksize - 1]
-
- # We don't end the file with a newline, because some platforms (like
- # Windows) emit CR/NL. There does not appear to be a BINARY open flag
- # that prevents this.
- puts -nonewline $fid "Z"
- close $fid
-
- # Make sure it worked
- if { [file size $fname] != $nblocks * $blocksize } {
- error "FAIL: file_create could not create correct file size"
- }
-}
-
-proc get_range { mpool max } {
- set pno [berkdb random_int 0 $max]
- set p [$mpool get $pno]
- error_check_good page [is_valid_page $p $mpool] TRUE
- set got [$p pgnum]
- if { $got != $pno } {
- puts "Get_range: Page mismatch page |$pno| val |$got|"
- }
- set ret [$p init "Page is pinned by [pid]"]
- error_check_good page_init $ret 0
-
- return $p
-}
-
-proc replace { mpool p } {
- set pgno [$p pgnum]
-
- set ret [$p init "Page is unpinned by [pid]"]
- error_check_good page_init $ret 0
-
- set ret [$p put -dirty]
- error_check_good page_put $ret 0
-
- set p2 [$mpool get $pgno]
- error_check_good page [is_valid_page $p2 $mpool] TRUE
-
- return $p2
-}
-
-proc mem_chk { flags } {
- source ./include.tcl
- global errorCode
-
- # Open the memp with region init specified
- env_cleanup $testdir
-
- set cachearg " -cachesize {0 400000 3}"
- set ret [catch {eval {berkdb_env_noerr -create -mode 0644}\
- $cachearg {-region_init -home $testdir} $flags} env]
- if { $ret != 0 } {
- # If the env open failed, it may be because we're on a platform
- # such as HP-UX 10 that won't support mutexes in shmget memory.
- # Or QNX, which doesn't support system memory at all.
- # Verify that the return value was EINVAL or EOPNOTSUPP
- # and bail gracefully.
- error_check_good is_shm_test [is_substr $flags -system_mem] 1
- error_check_good returned_error [expr \
- [is_substr $errorCode EINVAL] || \
- [is_substr $errorCode EOPNOTSUPP]] 1
- puts "Warning:\
- platform does not support mutexes in shmget memory."
- puts "Skipping shared memory mpool test."
- return 1
- }
- error_check_good env_open [is_valid_env $env] TRUE
- error_check_good env_close [$env close] 0
- env_cleanup $testdir
-
- return 0
-}
diff --git a/storage/bdb/test/memp002.tcl b/storage/bdb/test/memp002.tcl
deleted file mode 100644
index 763ef923d35..00000000000
--- a/storage/bdb/test/memp002.tcl
+++ /dev/null
@@ -1,62 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: memp002.tcl,v 11.49 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST memp002
-# TEST Tests multiple processes accessing and modifying the same files.
-proc memp002 { } {
- #
- # Multiple processes not supported by private memory so don't
- # run memp002_body with -private.
- #
- memp002_body ""
- memp002_body "-system_mem -shm_key 1"
-}
-
-proc memp002_body { flags } {
- source ./include.tcl
-
- puts "Memp002: {$flags} Multiprocess mpool tester"
-
- set procs 4
- set psizes "512 1024 2048 4096 8192"
- set iterations 500
- set npages 100
-
- # Check if this combination of flags is supported by this arch.
- if { [mem_chk $flags] == 1 } {
- return
- }
-
- set iter [expr $iterations / $procs]
-
- # Clean up old stuff and create new.
- env_cleanup $testdir
-
- for { set i 0 } { $i < [llength $psizes] } { incr i } {
- fileremove -f $testdir/file$i
- }
- set e [eval {berkdb_env -create -lock -home $testdir} $flags]
- error_check_good dbenv [is_valid_env $e] TRUE
-
- set pidlist {}
- for { set i 0 } { $i < $procs } {incr i} {
-
- puts "$tclsh_path\
- $test_path/mpoolscript.tcl $testdir $i $procs \
- $iter $psizes $npages 3 $flags > \
- $testdir/memp002.$i.out &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
- $iter $psizes $npages 3 $flags &]
- lappend pidlist $p
- }
- puts "Memp002: $procs independent processes now running"
- watch_procs $pidlist
-
- reset_env $e
-}
diff --git a/storage/bdb/test/memp003.tcl b/storage/bdb/test/memp003.tcl
deleted file mode 100644
index a4e68bfd58d..00000000000
--- a/storage/bdb/test/memp003.tcl
+++ /dev/null
@@ -1,154 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: memp003.tcl,v 11.51 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST memp003
-# TEST Test reader-only/writer process combinations; we use the access methods
-# TEST for testing.
-proc memp003 { } {
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
- #
- # Multiple processes not supported by private memory so don't
- # run memp003_body with -private.
- #
- memp003_body ""
- memp003_body "-system_mem -shm_key 1"
-}
-
-proc memp003_body { flags } {
- global alphabet
- source ./include.tcl
-
- puts "Memp003: {$flags} Reader/Writer tests"
-
- if { [mem_chk $flags] == 1 } {
- return
- }
-
- env_cleanup $testdir
- set psize 1024
- set nentries 500
- set testfile mpool.db
- set t1 $testdir/t1
-
- # Create an environment that the two processes can share, with
- # 20 pages per cache.
- set c [list 0 [expr $psize * 20 * 3] 3]
- set dbenv [eval {berkdb_env \
- -create -lock -home $testdir -cachesize $c} $flags]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # First open and create the file.
- set db [berkdb_open -env $dbenv -create \
- -mode 0644 -pagesize $psize -btree $testfile]
- error_check_good dbopen/RW [is_valid_db $db] TRUE
-
- set did [open $dict]
- set txn ""
- set count 0
-
- puts "\tMemp003.a: create database"
- set keys ""
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- lappend keys $str
-
- set ret [eval {$db put} $txn {$str $str}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn {$str}]
- error_check_good get $ret [list [list $str $str]]
-
- incr count
- }
- close $did
- error_check_good close [$db close] 0
-
- # Now open the file for read-only
- set db [berkdb_open -env $dbenv -rdonly $testfile]
- error_check_good dbopen/RO [is_substr $db db] 1
-
- puts "\tMemp003.b: verify a few keys"
- # Read and verify a couple of keys; saving them to check later
- set testset ""
- for { set i 0 } { $i < 10 } { incr i } {
- set ndx [berkdb random_int 0 [expr $nentries - 1]]
- set key [lindex $keys $ndx]
- if { [lsearch $testset $key] != -1 } {
- incr i -1
- continue;
- }
-
- # The remote process stuff is unhappy with
- # zero-length keys; make sure we don't pick one.
- if { [llength $key] == 0 } {
- incr i -1
- continue
- }
-
- lappend testset $key
-
- set ret [eval {$db get} $txn {$key}]
- error_check_good get/RO $ret [list [list $key $key]]
- }
-
- puts "\tMemp003.c: retrieve and modify keys in remote process"
- # Now open remote process where we will open the file RW
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- puts $f1 "flush stdout"
- flush $f1
-
- set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
- set remote_env [send_cmd $f1 \
- "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
- error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
-
- set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
- error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
-
- foreach k $testset {
- # Get the key
- set ret [send_cmd $f1 "$remote_db get $k"]
- error_check_good remote_get $ret [list [list $k $k]]
-
- # Now replace the key
- set ret [send_cmd $f1 "$remote_db put $k $k$k"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.d: verify changes in local process"
- foreach k $testset {
- set ret [eval {$db get} $txn {$key}]
- error_check_good get_verify/RO $ret [list [list $key $key$key]]
- }
-
- puts "\tMemp003.e: Fill up the cache with dirty buffers"
- foreach k $testset {
- # Now rewrite the keys with BIG data
- set data [replicate $alphabet 32]
- set ret [send_cmd $f1 "$remote_db put $k $data"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.f: Get more pages for the read-only file"
- dump_file $db $txn $t1 nop
-
- puts "\tMemp003.g: Sync from the read-only file"
- error_check_good db_sync [$db sync] 0
- error_check_good db_close [$db close] 0
-
- set ret [send_cmd $f1 "$remote_db close"]
- error_check_good remote_get $ret 0
-
- # Close the environment both remotely and locally.
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
- close $f1
-
- reset_env $dbenv
-}
diff --git a/storage/bdb/test/mpoolscript.tcl b/storage/bdb/test/mpoolscript.tcl
deleted file mode 100644
index 38d689c9666..00000000000
--- a/storage/bdb/test/mpoolscript.tcl
+++ /dev/null
@@ -1,171 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mpoolscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $
-#
-# Random multiple process mpool tester.
-# Usage: mpoolscript dir id numiters numfiles numpages sleepint
-# dir: lock directory.
-# id: Unique identifier for this process.
-# maxprocs: Number of procs in this test.
-# numiters: Total number of iterations.
-# pgsizes: Pagesizes for the different files. Length of this item indicates
-# how many files to use.
-# numpages: Number of pages per file.
-# sleepint: Maximum sleep interval.
-# flags: Flags for env open
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage \
- "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
-
-# Verify usage
-if { $argc != 8 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- puts $argc
- exit
-}
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set id [lindex $argv 1]
-set maxprocs [lindex $argv 2]
-set numiters [ lindex $argv 3 ]
-set pgsizes [ lindex $argv 4 ]
-set numpages [ lindex $argv 5 ]
-set sleepint [ lindex $argv 6 ]
-set flags [ lindex $argv 7]
-
-# Initialize seed
-global rand_init
-berkdb srand $rand_init
-
-# Give time for all processes to start up.
-tclsleep 10
-
-puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
-puts " $pgsizes $numpages $sleepint"
-flush stdout
-
-# Figure out how small/large to make the cache
-set max 0
-foreach i $pgsizes {
- if { $i > $max } {
- set max $i
- }
-}
-
-set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
-set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
-set e [eval $env_cmd $flags]
-error_check_good env_open [is_valid_env $e] TRUE
-
-# Now open files
-set mpools {}
-set nfiles 0
-foreach psize $pgsizes {
- set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
- error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
- lappend mpools $mp
- incr nfiles
-}
-
-puts "Establishing long-term pin on file 0 page $id for process $id"
-
-# Set up the long-pin page
-set locker [$e lock_id]
-set lock [$e lock_get write $locker 0:$id]
-error_check_good lock_get [is_valid_lock $lock $e] TRUE
-
-set mp [lindex $mpools 0]
-set master_page [$mp get -create $id]
-error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
-
-set r [$master_page init MASTER$id]
-error_check_good page_init $r 0
-
-# Release the lock but keep the page pinned
-set r [$lock put]
-error_check_good lock_put $r 0
-
-# Main loop. On each iteration, we'll check every page in each of
-# of the files. On any file, if we see the appropriate tag in the
-# field, we'll rewrite the page, else we won't. Keep track of
-# how many pages we actually process.
-set pages 0
-for { set iter 0 } { $iter < $numiters } { incr iter } {
- puts "[timestamp]: iteration $iter, $pages pages set so far"
- flush stdout
- for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
- if { [expr $fnum % 2 ] == 0 } {
- set pred [expr ($id + $maxprocs - 1) % $maxprocs]
- } else {
- set pred [expr ($id + $maxprocs + 1) % $maxprocs]
- }
-
- set mpf [lindex $mpools $fnum]
- for { set p 0 } { $p < $numpages } { incr p } {
- set lock [$e lock_get write $locker $fnum:$p]
- error_check_good lock_get:$fnum:$p \
- [is_valid_lock $lock $e] TRUE
-
- # Now, get the page
- set pp [$mpf get -create $p]
- error_check_good page_get:$fnum:$p \
- [is_valid_page $pp $mpf] TRUE
-
- if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
- # Set page to self.
- set r [$pp init $id]
- error_check_good page_init:$fnum:$p $r 0
- incr pages
- set r [$pp put -dirty]
- error_check_good page_put:$fnum:$p $r 0
- } else {
- error_check_good page_put:$fnum:$p [$pp put] 0
- }
- error_check_good lock_put:$fnum:$p [$lock put] 0
- }
- }
- tclsleep [berkdb random_int 1 $sleepint]
-}
-
-# Now verify your master page, release its pin, then verify everyone else's
-puts "$id: End of run verification of master page"
-set r [$master_page is_setto MASTER$id]
-error_check_good page_check $r 1
-set r [$master_page put -dirty]
-error_check_good page_put $r 0
-
-set i [expr ($id + 1) % $maxprocs]
-set mpf [lindex $mpools 0]
-
-while { $i != $id } {
- set p [$mpf get -create $i]
- error_check_good mp_get [is_valid_page $p $mpf] TRUE
-
- if { [$p is_setto MASTER$i] != 1 } {
- puts "Warning: Master page $i not set."
- }
- error_check_good page_put:$p [$p put] 0
-
- set i [expr ($i + 1) % $maxprocs]
-}
-
-# Close files
-foreach i $mpools {
- set r [$i close]
- error_check_good mpf_close $r 0
-}
-
-# Close environment system
-set r [$e close]
-error_check_good env_close $r 0
-
-puts "[timestamp] $id Complete"
-flush stdout
diff --git a/storage/bdb/test/mutex001.tcl b/storage/bdb/test/mutex001.tcl
deleted file mode 100644
index 66d14c41c95..00000000000
--- a/storage/bdb/test/mutex001.tcl
+++ /dev/null
@@ -1,51 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutex001.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST mutex001
-# TEST Test basic mutex functionality
-proc mutex001 { } {
- source ./include.tcl
-
- puts "Mutex001: Basic functionality"
- env_cleanup $testdir
- set nlocks 20
-
- # Test open w/out create; should fail
- error_check_bad \
- env_open [catch {berkdb_env -lock -home $testdir} env] 0
-
- puts "\tMutex001.a: Create lock env"
- # Now open for real
- set env [berkdb_env -create -mode 0644 -lock -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- puts "\tMutex001.b: Create $nlocks mutexes"
- set m [$env mutex 0644 $nlocks]
- error_check_good mutex_init [is_valid_mutex $m $env] TRUE
-
- # Get, set each mutex; sleep, then get Release
- puts "\tMutex001.c: Get/set loop"
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m get $i ]
- error_check_good mutex_get $r 0
-
- set r [$m setval $i $i]
- error_check_good mutex_setval $r 0
- }
- tclsleep 5
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m getval $i]
- error_check_good mutex_getval $r $i
-
- set r [$m release $i ]
- error_check_good mutex_get $r 0
- }
-
- error_check_good mutex_close [$m close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/mutex002.tcl b/storage/bdb/test/mutex002.tcl
deleted file mode 100644
index f03a63ac139..00000000000
--- a/storage/bdb/test/mutex002.tcl
+++ /dev/null
@@ -1,94 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutex002.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST mutex002
-# TEST Test basic mutex synchronization
-proc mutex002 { } {
- source ./include.tcl
-
- puts "Mutex002: Basic synchronization"
- env_cleanup $testdir
- set nlocks 20
-
- # Fork off child before we open any files.
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- flush $f1
-
- # Open the environment and the mutex locally
- puts "\tMutex002.a: Open local and remote env"
- set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
- error_check_good env_open [is_valid_env $local_env] TRUE
-
- set local_mutex [$local_env mutex 0644 $nlocks]
- error_check_good \
- mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
-
- # Open the environment and the mutex remotely
- set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
- error_check_good \
- mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
-
- # Do a get here, then set the value to be pid.
- # On the remote side fire off a get and getval.
- puts "\tMutex002.b: Local and remote get/set"
- set r [$local_mutex get 1]
- error_check_good lock_get $r 0
-
- set r [$local_mutex setval 1 [pid]]
- error_check_good lock_get $r 0
-
- # Now have the remote side request the lock and check its
- # value. Then wait 5 seconds, release the mutex and see
- # what the remote side returned.
- send_timed_cmd $f1 1 "$remote_mutex get 1"
- send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
-
- # Now sleep before resetting and releasing lock
- tclsleep 5
- set newv [expr [pid] - 1]
- set r [$local_mutex setval 1 $newv]
- error_check_good mutex_setval $r 0
-
- set r [$local_mutex release 1]
- error_check_good mutex_release $r 0
-
- # Now get the result from the other script
- # Timestamp
- set result [rcv_result $f1]
- error_check_good lock_get:remote_time [expr $result > 4] 1
-
- # Timestamp
- set result [rcv_result $f1]
-
- # Mutex value
- set result [send_cmd $f1 "puts \$ret"]
- error_check_good lock_get:remote_getval $result $newv
-
- # Close down the remote
- puts "\tMutex002.c: Close remote"
- set ret [send_cmd $f1 "$remote_mutex close" 5]
- # Not sure why we need this, but we do... an extra blank line
- # someone gets output somewhere
- gets $f1 ret
- error_check_good remote:mutex_close $ret 0
-
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
-
- catch { close $f1 } result
-
- set ret [$local_mutex close]
- error_check_good local:mutex_close $ret 0
-
- set ret [$local_env close]
- error_check_good local:env_close $ret 0
-}
diff --git a/storage/bdb/test/mutex003.tcl b/storage/bdb/test/mutex003.tcl
deleted file mode 100644
index 7efd0883e0e..00000000000
--- a/storage/bdb/test/mutex003.tcl
+++ /dev/null
@@ -1,52 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutex003.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $
-#
-
-# TEST mutex003
-# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
-proc mutex003 { } {
- source ./include.tcl
-
- set nmutex 20
- set iter 500
- set procs 5
- set mdegree 3
- set wait 2
- puts "Mutex003: Multi-process random mutex test"
-
- env_cleanup $testdir
-
- puts "\tMutex003.a: Create environment"
- # Now open the region we'll use for multiprocess testing.
- set env [berkdb_env -create -mode 0644 -lock -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set mutex [$env mutex 0644 $nmutex]
- error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
-
- error_check_good mutex_close [$mutex close] 0
-
- # Now spawn off processes
- puts "\tMutex003.b: Create $procs processes"
- set pidlist {}
- for { set i 0 } {$i < $procs} {incr i} {
- puts "$tclsh_path\
- $test_path/mutexscript.tcl $testdir\
- $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mutexscript.tcl $testdir/$i.mutexout $testdir\
- $iter $nmutex $wait $mdegree &]
- lappend pidlist $p
- }
- puts "\tMutex003.c: $procs independent processes now running"
- watch_procs $pidlist
- error_check_good env_close [$env close] 0
- # Remove output files
- for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $testdir/$i.mutexout
- }
-}
diff --git a/storage/bdb/test/mutexscript.tcl b/storage/bdb/test/mutexscript.tcl
deleted file mode 100644
index 7bf1bbe9552..00000000000
--- a/storage/bdb/test/mutexscript.tcl
+++ /dev/null
@@ -1,91 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutexscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $
-#
-# Random mutex tester.
-# Usage: mutexscript dir numiters mlocks sleepint degree
-# dir: dir in which all the mutexes live.
-# numiters: Total number of iterations.
-# nmutex: Total number of mutexes.
-# sleepint: Maximum sleep interval.
-# degree: Maximum number of locks to acquire at once
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "mutexscript dir numiters nmutex sleepint degree"
-
-# Verify usage
-if { $argc != 5 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set numiters [ lindex $argv 1 ]
-set nmutex [ lindex $argv 2 ]
-set sleepint [ lindex $argv 3 ]
-set degree [ lindex $argv 4 ]
-set locker [pid]
-set mypid [sanitized_pid]
-
-# Initialize seed
-global rand_init
-berkdb srand $rand_init
-
-puts -nonewline "Mutexscript: Beginning execution for $locker:"
-puts " $numiters $nmutex $sleepint $degree"
-flush stdout
-
-# Open the environment and the mutex
-set e [berkdb_env -create -mode 0644 -lock -home $dir]
-error_check_good evn_open [is_valid_env $e] TRUE
-
-set mutex [$e mutex 0644 $nmutex]
-error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE
-
-# Sleep for awhile to make sure that everyone has gotten in
-tclsleep 5
-
-for { set iter 0 } { $iter < $numiters } { incr iter } {
- set nlocks [berkdb random_int 1 $degree]
- # We will always lock objects in ascending order to avoid
- # deadlocks.
- set lastobj 1
- set mlist {}
- for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
- # Pick lock parameters
- set obj [berkdb random_int $lastobj [expr $nmutex - 1]]
- set lastobj [expr $obj + 1]
- puts "[timestamp] $locker $lnum: $obj"
-
- # Do get, set its val to own pid, and then add to list
- error_check_good mutex_get:$obj [$mutex get $obj] 0
- error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0
- lappend mlist $obj
- if {$lastobj >= $nmutex} {
- break
- }
- }
-
- # Sleep for 10 to (100*$sleepint) ms.
- after [berkdb random_int 10 [expr $sleepint * 100]]
-
- # Now release locks
- foreach i $mlist {
- error_check_good mutex_getval:$i [$mutex getval $i] $mypid
- error_check_good mutex_setval:$i \
- [$mutex setval $i [expr 0 - $mypid]] 0
- error_check_good mutex_release:$i [$mutex release $i] 0
- }
- puts "[timestamp] $locker released mutexes"
- flush stdout
-}
-
-puts "[timestamp] $locker Complete"
-flush stdout
diff --git a/storage/bdb/test/ndbm.tcl b/storage/bdb/test/ndbm.tcl
deleted file mode 100644
index 00ee1f4fdbf..00000000000
--- a/storage/bdb/test/ndbm.tcl
+++ /dev/null
@@ -1,144 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: ndbm.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $
-#
-# Historic NDBM interface test.
-# Use the first 1000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Then reopen the file, re-retrieve everything.
-# Finally, delete everything.
-proc ndbm { { nentries 1000 } } {
- source ./include.tcl
-
- puts "NDBM interfaces test: $nentries"
-
- # Create the database and open the dictionary
- set testfile $testdir/ndbmtest
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir NULL
-
- set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
- error_check_good ndbm_open [is_substr $db ndbm] 1
- set did [open $dict]
-
- error_check_good rdonly_false [$db rdonly] 0
-
- set flags 0
- set txn 0
- set count 0
- set skippednullkey 0
-
- puts "\tNDBM.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- # NDBM can't handle zero-length keys
- if { [string length $str] == 0 } {
- set skippednullkey 1
- continue
- }
-
- set ret [$db store $str $str insert]
- error_check_good ndbm_store $ret 0
-
- set d [$db fetch $str]
- error_check_good ndbm_fetch $d $str
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tNDBM.b: dump file"
- set oid [open $t1 w]
- for { set key [$db firstkey] } { $key != -1 } {
- set key [$db nextkey] } {
- puts $oid $key
- set d [$db fetch $key]
- error_check_good ndbm_refetch $d $key
- }
-
- # If we had to skip a zero-length key, juggle things to cover up
- # this fact in the dump.
- if { $skippednullkey == 1 } {
- puts $oid ""
- incr nentries 1
- }
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
-
- error_check_good NDBM:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # File descriptors tests won't work under Windows.
- if { $is_windows_test != 1 } {
- puts "\tNDBM.c: pagf/dirf test"
- set fd [$db pagfno]
- error_check_bad pagf $fd -1
- set fd [$db dirfno]
- error_check_bad dirf $fd -1
- }
-
- puts "\tNDBM.d: close, open, and dump file"
-
- # Now, reopen the file and run the last test again.
- error_check_good ndbm_close [$db close] 0
- set db [berkdb ndbm_open -rdonly $testfile]
- error_check_good ndbm_open2 [is_substr $db ndbm] 1
- set oid [open $t1 w]
-
- error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
-
- for { set key [$db firstkey] } { $key != -1 } {
- set key [$db nextkey] } {
- puts $oid $key
- set d [$db fetch $key]
- error_check_good ndbm_refetch2 $d $key
- }
- if { $skippednullkey == 1 } {
- puts $oid ""
- }
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- filesort $t1 $t3
-
- error_check_good NDBM:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and delete each entry
- puts "\tNDBM.e: sequential scan and delete"
-
- error_check_good ndbm_close [$db close] 0
- set db [berkdb ndbm_open $testfile]
- error_check_good ndbm_open3 [is_substr $db ndbm] 1
- set oid [open $t1 w]
-
- for { set key [$db firstkey] } { $key != -1 } {
- set key [$db nextkey] } {
- puts $oid $key
- set ret [$db delete $key]
- error_check_good ndbm_delete $ret 0
- }
- if { $skippednullkey == 1 } {
- puts $oid ""
- }
- close $oid
-
- # Now compare the keys to see if they match the dictionary (or ints)
- filesort $t1 $t3
-
- error_check_good NDBM:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
- error_check_good ndbm_close [$db close] 0
-}
diff --git a/storage/bdb/test/parallel.tcl b/storage/bdb/test/parallel.tcl
deleted file mode 100644
index bd1f468fa5d..00000000000
--- a/storage/bdb/test/parallel.tcl
+++ /dev/null
@@ -1,383 +0,0 @@
-# Code to load up the tests in to the Queue database
-# $Id: parallel.tcl,v 11.46 2004/09/22 18:01:05 bostic Exp $
-proc load_queue { file {dbdir RUNQUEUE} nitems } {
- global serial_tests
- global num_serial
- global num_parallel
-
- puts -nonewline "Loading run queue with $nitems items..."
- flush stdout
-
- set env [berkdb_env -create -lock -home $dbdir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- # Open two databases, one for tests that may be run
- # in parallel, the other for tests we want to run
- # while only a single process is testing.
- set db [eval {berkdb_open -env $env -create \
- -mode 0644 -len 200 -queue queue.db} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- set serialdb [eval {berkdb_open -env $env -create \
- -mode 0644 -len 200 -queue serialqueue.db} ]
- error_check_good dbopen [is_valid_db $serialdb] TRUE
-
- set fid [open $file]
-
- set count 0
-
- while { [gets $fid str] != -1 } {
- set testarr($count) $str
- incr count
- }
-
- # Randomize array of tests.
- set rseed [pid]
- berkdb srand $rseed
- puts -nonewline "randomizing..."
- flush stdout
- for { set i 0 } { $i < $count } { incr i } {
- set tmp $testarr($i)
-
- # RPC test is very long so force it to run first
- # in full runs. If we find 'r rpc' as we walk the
- # array, arrange to put it in slot 0 ...
- if { [is_substr $tmp "r rpc"] == 1 && \
- [string match $nitems ALL] } {
- set j 0
- } else {
- set j [berkdb random_int $i [expr $count - 1]]
- }
- # ... and if 'r rpc' is selected to be swapped with the
- # current item in the array, skip the swap. If we
- # did the swap and moved to the next item, "r rpc" would
- # never get moved to slot 0.
- if { [is_substr $testarr($j) "r rpc"] && \
- [string match $nitems ALL] } {
- continue
- }
-
- set testarr($i) $testarr($j)
- set testarr($j) $tmp
- }
-
- if { [string compare ALL $nitems] != 0 } {
- set maxload $nitems
- } else {
- set maxload $count
- }
-
- puts "loading..."
- flush stdout
- set num_serial 0
- set num_parallel 0
- for { set i 0 } { $i < $maxload } { incr i } {
- set str $testarr($i)
- # Push serial tests into serial testing db, others
- # into parallel db.
- if { [is_serial $str] } {
- set ret [eval {$serialdb put -append $str}]
- error_check_good put:serialdb [expr $ret > 0] 1
- incr num_serial
- } else {
- set ret [eval {$db put -append $str}]
- error_check_good put:paralleldb [expr $ret > 0] 1
- incr num_parallel
- }
- }
-
- error_check_good maxload $maxload [expr $num_serial + $num_parallel]
- puts "Loaded $maxload records: $num_serial in serial,\
- $num_parallel in parallel."
- close $fid
- $db close
- $serialdb close
- $env close
-}
-
-proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
-
- if { [file exists $dbdir] != 1 } {
- file mkdir $dbdir
- }
- puts "Creating test list..."
- $list ALL -n
- load_queue ALL.OUT $dbdir $nitems
- file delete TEST.LIST
- file rename ALL.OUT TEST.LIST
-}
-
-proc run_parallel { nprocs {list run_all} {nitems ALL} } {
- global num_serial
- global num_parallel
-
- # Forcibly remove stuff from prior runs, if it's still there.
- fileremove -f ./RUNQUEUE
- set dirs [glob -nocomplain ./PARALLEL_TESTDIR.*]
- set files [glob -nocomplain ALL.OUT.*]
- foreach file $files {
- fileremove -f $file
- }
- foreach dir $dirs {
- fileremove -f $dir
- }
-
- set basename ./PARALLEL_TESTDIR
- set queuedir ./RUNQUEUE
- source ./include.tcl
-
- mkparalleldirs $nprocs $basename $queuedir
-
- init_runqueue $queuedir $nitems $list
-
- set basedir [pwd]
- set queuedir ../../[string range $basedir \
- [string last "/" $basedir] end]/$queuedir
-
- # Run serial tests in parallel testdir 0.
- run_queue 0 $basename.0 $queuedir serial $num_serial
-
- set pidlist {}
- # Run parallel tests in testdirs 1 through n.
- for { set i 1 } { $i <= $nprocs } { incr i } {
- set ret [catch {
- set p [exec $tclsh_path << \
- "source $test_path/test.tcl; run_queue $i \
- $basename.$i $queuedir parallel $num_parallel" &]
- lappend pidlist $p
- set f [open $testdir/begin.$p w]
- close $f
- } res]
- }
- watch_procs $pidlist 300 360000
-
- set failed 0
- for { set i 0 } { $i <= $nprocs } { incr i } {
- if { [file exists ALL.OUT.$i] == 1 } {
- puts -nonewline "Checking output from ALL.OUT.$i ... "
- if { [check_output ALL.OUT.$i] == 1 } {
- set failed 1
- }
- puts " done."
- }
- }
- if { $failed == 0 } {
- puts "Regression tests succeeded."
- } else {
- puts "Regression tests failed."
- puts "Review UNEXPECTED OUTPUT lines above for errors."
- puts "Complete logs found in ALL.OUT.x files"
- }
-}
-
-proc run_queue { i rundir queuedir {qtype parallel} {nitems 0} } {
- set builddir [pwd]
- file delete $builddir/ALL.OUT.$i
- cd $rundir
-
- puts "Starting $qtype run_queue process $i (pid [pid])."
-
- source ./include.tcl
- global env
-
- set dbenv [berkdb_env -create -lock -home $queuedir]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- if { $qtype == "parallel" } {
- set db [eval {berkdb_open -env $dbenv \
- -mode 0644 -queue queue.db} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- } elseif { $qtype == "serial" } {
- set db [eval {berkdb_open -env $dbenv \
- -mode 0644 -queue serialqueue.db} ]
- error_check_good serialdbopen [is_valid_db $db] TRUE
- } else {
- puts "FAIL: queue type $qtype not recognized"
- }
-
- set dbc [eval $db cursor]
- error_check_good cursor [is_valid_cursor $dbc $db] TRUE
-
- set count 0
- set waitcnt 0
- set starttime [timestamp -r]
-
- while { $waitcnt < 5 } {
- set line [$db get -consume]
- if { [ llength $line ] > 0 } {
- set cmd [lindex [lindex $line 0] 1]
- set num [lindex [lindex $line 0] 0]
- set o [open $builddir/ALL.OUT.$i a]
- puts $o "\nExecuting record $num ([timestamp -w]):\n"
- set tdir "TESTDIR.$i"
- regsub {TESTDIR} $cmd $tdir cmd
- puts $o $cmd
- close $o
- if { [expr {$num % 10} == 0] && $nitems != 0 } {
- puts -nonewline \
- "Starting test $num of $nitems $qtype items. "
- set now [timestamp -r]
- set elapsed [expr $now - $starttime]
- set esttotal [expr $nitems * $elapsed / $num]
- set remaining [expr $esttotal - $elapsed]
- if { $remaining < 3600 } {
- puts "\tRough guess: less than 1\
- hour left."
- } else {
- puts "\tRough guess: \
- [expr $remaining / 3600] hour(s) left."
- }
- }
-# puts "Process $i, record $num:\n$cmd"
- set env(PURIFYOPTIONS) \
- "-log-file=./test$num.%p -follow-child-processes -messages=first"
- set env(PURECOVOPTIONS) \
- "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; $cmd" \
- >>& $builddir/ALL.OUT.$i } res] {
- set o [open $builddir/ALL.OUT.$i a]
- puts $o "FAIL: '$cmd': $res"
- close $o
- }
- env_cleanup $testdir
- set o [open $builddir/ALL.OUT.$i a]
- puts $o "\nEnding record $num ([timestamp])\n"
- close $o
- incr count
- } else {
- incr waitcnt
- tclsleep 1
- }
- }
-
- set now [timestamp -r]
- set elapsed [expr $now - $starttime]
- puts "Process $i: $count commands executed in [format %02u:%02u \
- [expr $elapsed / 3600] [expr ($elapsed % 3600) / 60]]"
-
- error_check_good close_parallel_cursor_$i [$dbc close] 0
- error_check_good close_parallel_db_$i [$db close] 0
- error_check_good close_parallel_env_$i [$dbenv close] 0
-
- #
- # We need to put the pid file in the builddir's idea
- # of testdir, not this child process' local testdir.
- # Therefore source builddir's include.tcl to get its
- # testdir.
- # !!! This resets testdir, so don't do anything else
- # local to the child after this.
- source $builddir/include.tcl
-
- set f [open $builddir/$testdir/end.[pid] w]
- close $f
- cd $builddir
-}
-
-proc mkparalleldirs { nprocs basename queuedir } {
- source ./include.tcl
- set dir [pwd]
-
- if { $is_windows_test != 1 } {
- set EXE ""
- } else {
- set EXE ".exe"
- }
- for { set i 0 } { $i <= $nprocs } { incr i } {
- set destdir $basename.$i
- catch {file mkdir $destdir}
- puts "Created $destdir"
- if { $is_windows_test == 1 } {
- catch {file mkdir $destdir/Debug}
- catch {eval file copy \
- [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
- }
- catch {eval file copy \
- [eval glob {$dir/{.libs,include.tcl}}] $destdir}
- # catch {eval file copy $dir/$queuedir $destdir}
- catch {eval file copy \
- [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
- {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
- {$dir/db_{archive,verify}$EXE}] \
- $destdir}
-
- # Create modified copies of include.tcl in parallel
- # directories so paths still work.
-
- set infile [open ./include.tcl r]
- set d [read $infile]
- close $infile
-
- regsub {test_path } $d {test_path ../} d
- regsub {src_root } $d {src_root ../} d
- set tdir "TESTDIR.$i"
- regsub -all {TESTDIR} $d $tdir d
- regsub {KILL \.} $d {KILL ..} d
- set outfile [open $destdir/include.tcl w]
- puts $outfile $d
- close $outfile
-
- global svc_list
- foreach svc_exe $svc_list {
- if { [file exists $dir/$svc_exe] } {
- catch {eval file copy $dir/$svc_exe $destdir}
- }
- }
- }
-}
-
-proc run_ptest { nprocs test args } {
- global parms
- set basename ./PARALLEL_TESTDIR
- set queuedir NULL
- source ./include.tcl
-
- mkparalleldirs $nprocs $basename $queuedir
-
- if { [info exists parms($test)] } {
- foreach method \
- "hash queue queueext recno rbtree frecno rrecno btree" {
- if { [eval exec_ptest $nprocs $basename \
- $test $method $args] != 0 } {
- break
- }
- }
- } else {
- eval exec_ptest $nprocs $basename $test $args
- }
-}
-
-proc exec_ptest { nprocs basename test args } {
- source ./include.tcl
-
- set basedir [pwd]
- set pidlist {}
- puts "Running $nprocs parallel runs of $test"
- for { set i 1 } { $i <= $nprocs } { incr i } {
- set outf ALL.OUT.$i
- fileremove -f $outf
- set ret [catch {
- set p [exec $tclsh_path << \
- "cd $basename.$i;\
- source ../$test_path/test.tcl;\
- $test $args" >& $outf &]
- lappend pidlist $p
- set f [open $testdir/begin.$p w]
- close $f
- } res]
- }
- watch_procs $pidlist 30 36000
- set failed 0
- for { set i 1 } { $i <= $nprocs } { incr i } {
- if { [check_output ALL.OUT.$i] == 1 } {
- set failed 1
- puts "Test $test failed in process $i."
- }
- }
- if { $failed == 0 } {
- puts "Test $test succeeded all processes"
- return 0
- } else {
- puts "Test failed: stopping"
- return 1
- }
-}
diff --git a/storage/bdb/test/recd001.tcl b/storage/bdb/test/recd001.tcl
deleted file mode 100644
index 67ad8004cc2..00000000000
--- a/storage/bdb/test/recd001.tcl
+++ /dev/null
@@ -1,242 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd001.tcl,v 11.43 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd001
-# TEST Per-operation recovery tests for non-duplicate, non-split
-# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
-# TEST condition. Any test that appears with the message (change state)
-# TEST indicates that we've already run the particular test, but we are
-# TEST running it again so that we can change the state of the data base
-# TEST to prepare for the next test (this applies to all other recovery
-# TEST tests as well).
-# TEST
-# TEST These are the most basic recovery tests. We do individual recovery
-# TEST tests for each operation in the access method interface. First we
-# TEST create a file and capture the state of the database (i.e., we copy
-# TEST it. Then we run a transaction containing a single operation. In
-# TEST one test, we abort the transaction and compare the outcome to the
-# TEST original copy of the file. In the second test, we restore the
-# TEST original copy of the database and then run recovery and compare
-# TEST this against the actual database.
-proc recd001 { method {select 0} args} {
- global fixed_len
- source ./include.tcl
-
- set orig_fixed_len $fixed_len
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd001: $method operation/transaction tests"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- # The recovery tests were originally written to
- # do a command, abort, do it again, commit, and then
- # repeat the sequence with another command. Each command
- # tends to require that the previous command succeeded and
- # left the database a certain way. To avoid cluttering up the
- # op_recover interface as well as the test code, we create two
- # databases; one does abort and then commit for each op, the
- # other does prepare, prepare-abort, and prepare-commit for each
- # op. If all goes well, this allows each command to depend
- # exactly one successful iteration of the previous command.
- set testfile recd001.db
- set testfile2 recd001-2.db
-
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd001.a.0: creating environment"
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- #
- # We need to create a database to get the pagesize (either
- # the default or whatever might have been specified).
- # Then remove it so we can compute fixed_len and create the
- # real database.
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set stat [$db stat]
- #
- # Compute the fixed_len based on the pagesize being used.
- # We want the fixed_len to be 1/4 the pagesize.
- #
- set pg [get_pagesize $stat]
- error_check_bad get_pagesize $pg -1
- set fixed_len [expr $pg / 4]
- error_check_good db_close [$db close] 0
- error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
-
- # Convert the args again because fixed_len is now real.
- # Create the databases and close the environment.
- # cannot specify db truncate in txn protected env!!!
- set opts [convert_args $method ""]
- set omethod [convert_method $method]
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv $opts $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- error_check_good env_close [$dbenv close] 0
-
- puts "\tRecd001.a.1: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-
- # List of recovery tests: {CMD MSG} pairs.
- set rlist {
- { {DB put -txn TXNID $key $data} "Recd001.b: put"}
- { {DB del -txn TXNID $key} "Recd001.c: delete"}
- { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
- { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
- { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
- { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
- { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
- { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
- { {DB put -txn TXNID -partial "$off $len" $key $partial_grow}
- "Recd001.j: partial put growing"}
- { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
- { {DB put -txn TXNID -partial "$off $len" $key $partial_shrink}
- "Recd001.l: partial put shrinking"}
- { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
- { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
- }
-
- # These are all the data values that we're going to need to read
- # through the operation table and run the recovery tests.
-
- if { [is_record_based $method] == 1 } {
- set key 1
- } else {
- set key recd001_key
- }
- set data recd001_data
- set newdata NEWrecd001_dataNEW
- set off 3
- set len 12
-
- set partial_grow replacement_record_grow
- set partial_shrink xxx
- if { [is_fixed_length $method] == 1 } {
- set len [string length $partial_grow]
- set partial_shrink $partial_grow
- }
- set bigdata [replicate $key $fixed_len]
- if { [is_record_based $method] == 1 } {
- set bigkey $fixed_len
- } else {
- set bigkey [replicate $key $fixed_len]
- }
-
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
-
- if { [is_queue $method] != 1 } {
- if { [string first append $cmd] != -1 } {
- continue
- }
- if { [string first consume $cmd] != -1 } {
- continue
- }
- }
-
-# if { [is_fixed_length $method] == 1 } {
-# if { [string first partial $cmd] != -1 } {
-# continue
-# }
-# }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg
- }
- set fixed_len $orig_fixed_len
-
- if { [is_fixed_length $method] == 1 } {
- puts "Skipping remainder of test for fixed length methods"
- return
- }
-
- #
- # Check partial extensions. If we add a key/data to the database
- # and then expand it using -partial, then recover, recovery was
- # failing in #3944. Check that scenario here.
- #
- # !!!
- # We loop here because on each iteration, we need to clean up
- # the old env (i.e. this test does not depend on earlier runs).
- # If we run it without cleaning up the env inbetween, we do not
- # test the scenario of #3944.
- #
- set len [string length $data]
- set len2 256
- set part_data [replicate "abcdefgh" 32]
- set p [list 0 $len]
- set cmd [subst \
- {DB put -txn TXNID -partial "$len $len2" $key $part_data}]
- set msg "Recd001.o: partial put prepopulated/expanding"
- foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
- env_cleanup $testdir
-
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
- set t [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -txn $t $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -txn $t $opts $testfile2"
- set db2 [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db2] TRUE
-
- set ret [$db put -txn $t -partial $p $key $data]
- error_check_good dbput $ret 0
-
- set ret [$db2 put -txn $t -partial $p $key $data]
- error_check_good dbput $ret 0
- error_check_good txncommit [$t commit] 0
- error_check_good dbclose [$db close] 0
- error_check_good dbclose [$db2 close] 0
- error_check_good dbenvclose [$dbenv close] 0
-
- op_recover $op $testdir $env_cmd $testfile $cmd $msg
- }
- return
-}
diff --git a/storage/bdb/test/recd002.tcl b/storage/bdb/test/recd002.tcl
deleted file mode 100644
index 6189c5e2304..00000000000
--- a/storage/bdb/test/recd002.tcl
+++ /dev/null
@@ -1,103 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd002.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd002
-# TEST Split recovery tests. For every known split log message, makes sure
-# TEST that we exercise redo, undo, and do-nothing condition.
-proc recd002 { method {select 0} args} {
- source ./include.tcl
- global rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd002: skipping for specific pagesizes"
- return
- }
- berkdb srand $rand_init
-
- # Queues don't do splits, so we don't really need the small page
- # size and the small page size is smaller than the record, so it's
- # a problem.
- if { [string compare $omethod "-queue"] == 0 } {
- set pagesize 4096
- } else {
- set pagesize 512
- }
- puts "Recd002: $method split recovery tests"
-
- env_cleanup $testdir
- set testfile recd002.db
- set testfile2 recd002-2.db
- set eflags \
- "-create -txn -lock_max 2000 -home $testdir"
-
- puts "\tRecd002.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Create the databases. We will use a small page size so that splits
- # happen fairly quickly.
- set oflags "-create $args $omethod -mode 0644 -env $dbenv\
- -pagesize $pagesize $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- set oflags "-create $args $omethod -mode 0644 -env $dbenv\
- -pagesize $pagesize $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- reset_env $dbenv
-
- # List of recovery tests: {CMD MSG} pairs
- set slist {
- { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
- { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
- }
-
- # If pages are 512 bytes, then adding 512 key/data pairs
- # should be more than sufficient.
- set n 512
- set r [expr $n / 2 ]
- foreach pair $slist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg
- }
-
- puts "\tRecd002.d: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
diff --git a/storage/bdb/test/recd003.tcl b/storage/bdb/test/recd003.tcl
deleted file mode 100644
index b6e799b3c18..00000000000
--- a/storage/bdb/test/recd003.tcl
+++ /dev/null
@@ -1,119 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd003.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd003
-# TEST Duplicate recovery tests. For every known duplicate log message,
-# TEST makes sure that we exercise redo, undo, and do-nothing condition.
-# TEST
-# TEST Test all the duplicate log messages and recovery operations. We make
-# TEST sure that we exercise all possible recovery actions: redo, undo, undo
-# TEST but no fix necessary and redo but no fix necessary.
-proc recd003 { method {select 0} args } {
- source ./include.tcl
- global rand_init
-
- set largs [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Recd003 skipping for method $method"
- return
- }
- puts "Recd003: $method duplicate recovery tests"
-
- berkdb srand $rand_init
-
- env_cleanup $testdir
- # See comment in recd001.tcl for why there are two database files...
- set testfile recd003.db
- set testfile2 recd003-2.db
- set eflags "-create -txn -home $testdir"
-
- puts "\tRecd003.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Create the databases.
- set oflags \
- "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- set oflags \
- "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- reset_env $dbenv
-
- # These are all the data values that we're going to need to read
- # through the operation table and run the recovery tests.
- set n 10
- set dupn 2000
- set bign 500
-
- # List of recovery tests: {CMD MSG} pairs
- set dlist {
- { {populate DB $omethod TXNID $n 1 0}
- "Recd003.b: add dups"}
- { {DB del -txn TXNID duplicate_key}
- "Recd003.c: remove dups all at once"}
- { {populate DB $omethod TXNID $n 1 0}
- "Recd003.d: add dups (change state)"}
- { {unpopulate DB TXNID 0}
- "Recd003.e: remove dups 1 at a time"}
- { {populate DB $omethod TXNID $dupn 1 0}
- "Recd003.f: dup split"}
- { {DB del -txn TXNID duplicate_key}
- "Recd003.g: remove dups (change state)"}
- { {populate DB $omethod TXNID $n 1 1}
- "Recd003.h: add big dup"}
- { {DB del -txn TXNID duplicate_key}
- "Recd003.i: remove big dup all at once"}
- { {populate DB $omethod TXNID $n 1 1}
- "Recd003.j: add big dup (change state)"}
- { {unpopulate DB TXNID 0}
- "Recd003.k: remove big dup 1 at a time"}
- { {populate DB $omethod TXNID $bign 1 1}
- "Recd003.l: split big dup"}
- }
-
- foreach pair $dlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg
- }
-
- puts "\tRecd003.m: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
diff --git a/storage/bdb/test/recd004.tcl b/storage/bdb/test/recd004.tcl
deleted file mode 100644
index 72e66b5e674..00000000000
--- a/storage/bdb/test/recd004.tcl
+++ /dev/null
@@ -1,95 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd004.tcl,v 11.31 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd004
-# TEST Big key test where big key gets elevated to internal page.
-proc recd004 { method {select 0} args} {
- source ./include.tcl
- global rand_init
-
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd004: skipping for specific pagesizes"
- return
- }
- if { [is_record_based $method] == 1 } {
- puts "Recd004 skipping for method $method"
- return
- }
- puts "Recd004: $method big-key on internal page recovery tests"
-
- berkdb srand $rand_init
-
- env_cleanup $testdir
- set testfile recd004.db
- set testfile2 recd004-2.db
- set eflags "-create -txn -home $testdir"
- puts "\tRecd004.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Create the databases. We will use a small page size so that we
- # elevate quickly
- set oflags "-create -mode 0644 \
- $omethod -env $dbenv $opts -pagesize 512 $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- set oflags "-create -mode 0644 \
- $omethod -env $dbenv $opts -pagesize 512 $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- reset_env $dbenv
-
- # List of recovery tests: {CMD MSG} pairs
- set slist {
- { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
- { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
- }
-
- # If pages are 512 bytes, then adding 512 key/data pairs
- # should be more than sufficient.
- set n 512
- foreach pair $slist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg
- }
-
- puts "\tRecd004.d: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
diff --git a/storage/bdb/test/recd005.tcl b/storage/bdb/test/recd005.tcl
deleted file mode 100644
index df6a2df3a53..00000000000
--- a/storage/bdb/test/recd005.tcl
+++ /dev/null
@@ -1,230 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd005.tcl,v 11.36 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd005
-# TEST Verify reuse of file ids works on catastrophic recovery.
-# TEST
-# TEST Make sure that we can do catastrophic recovery even if we open
-# TEST files using the same log file id.
-proc recd005 { method args} {
- source ./include.tcl
- global rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd005: $method catastrophic recovery"
-
- berkdb srand $rand_init
-
- set testfile1 recd005.1.db
- set testfile2 recd005.2.db
- set eflags \
- "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir"
-
- set tnum 0
- foreach sizes "{1000 10} {10 1000}" {
- foreach ops "{abort abort} {abort commit} {commit abort} \
- {commit commit}" {
- env_cleanup $testdir
- incr tnum
-
- set s1 [lindex $sizes 0]
- set s2 [lindex $sizes 1]
- set op1 [lindex $ops 0]
- set op2 [lindex $ops 1]
- puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
-
- puts "\tRecd005.$tnum.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Create the two databases.
- set oflags \
- "-create -mode 0644 -env $dbenv $args $omethod"
- set db1 [eval {berkdb_open} $oflags $testfile1]
- error_check_bad db_open $db1 NULL
- error_check_good db_open [is_substr $db1 db] 1
- error_check_good db_close [$db1 close] 0
-
- set db2 [eval {berkdb_open} $oflags $testfile2]
- error_check_bad db_open $db2 NULL
- error_check_good db_open [is_substr $db2 db] 1
- error_check_good db_close [$db2 close] 0
- $dbenv close
-
- set dbenv [eval $env_cmd]
- puts "\tRecd005.$tnum.b: Populating databases"
- do_one_file \
- $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1
- do_one_file \
- $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2
-
- puts "\tRecd005.$tnum.c: Verifying initial population"
- check_file $testdir $env_cmd $testfile1 $op1
- check_file $testdir $env_cmd $testfile2 $op2
-
- # Now, close the environment (so that recovery will work
- # on NT which won't allow delete of an open file).
- reset_env $dbenv
-
- berkdb debug_check
- puts -nonewline \
- "\tRecd005.$tnum.d: About to run recovery ... "
- flush stdout
-
- set stat [catch \
- {exec $util_path/db_recover -h $testdir -c} \
- result]
- if { $stat == 1 } {
- error "Recovery error: $result."
- }
- puts "complete"
-
- # Substitute a file that will need recovery and try
- # running recovery again.
- if { $op1 == "abort" } {
- file copy -force $testdir/$testfile1.afterop \
- $testdir/$testfile1
- move_file_extent $testdir $testfile1 \
- afterop copy
- } else {
- file copy -force $testdir/$testfile1.init \
- $testdir/$testfile1
- move_file_extent $testdir $testfile1 init copy
- }
- if { $op2 == "abort" } {
- file copy -force $testdir/$testfile2.afterop \
- $testdir/$testfile2
- move_file_extent $testdir $testfile2 \
- afterop copy
- } else {
- file copy -force $testdir/$testfile2.init \
- $testdir/$testfile2
- move_file_extent $testdir $testfile2 init copy
- }
-
- berkdb debug_check
- puts -nonewline "\tRecd005.$tnum.e:\
- About to run recovery on pre-op database ... "
- flush stdout
-
- set stat \
- [catch {exec $util_path/db_recover \
- -h $testdir -c} result]
- if { $stat == 1 } {
- error "Recovery error: $result."
- }
- puts "complete"
-
- set dbenv [eval $env_cmd]
- check_file $testdir $env_cmd $testfile1 $op1
- check_file $testdir $env_cmd $testfile2 $op2
- reset_env $dbenv
-
- puts "\tRecd005.$tnum.f:\
- Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch \
- {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
- }
- }
-}
-
-proc do_one_file { dir method env env_cmd filename num op } {
- source ./include.tcl
-
- set init_file $dir/$filename.t1
- set afterop_file $dir/$filename.t2
- set final_file $dir/$filename.t3
-
- # Save the initial file and open the environment and the first file
- file copy -force $dir/$filename $dir/$filename.init
- copy_extent_file $dir $filename init
- set oflags "-auto_commit -unknown -env $env"
- set db [eval {berkdb_open} $oflags $filename]
-
- # Dump out file contents for initial case
- open_and_dump_file $filename $env $init_file nop \
- dump_file_direction "-first" "-next"
-
- set txn [$env txn]
- error_check_bad txn_begin $txn NULL
- error_check_good txn_begin [is_substr $txn $env] 1
-
- # Now fill in the db and the txnid in the command
- populate $db $method $txn $num 0 0
-
- # Sync the file so that we can capture a snapshot to test
- # recovery.
- error_check_good sync:$db [$db sync] 0
- file copy -force $dir/$filename $dir/$filename.afterop
- copy_extent_file $dir $filename afterop
- open_and_dump_file $testdir/$filename.afterop NULL \
- $afterop_file nop dump_file_direction "-first" "-next"
- error_check_good txn_$op:$txn [$txn $op] 0
-
- if { $op == "commit" } {
- puts "\t\tFile $filename executed and committed."
- } else {
- puts "\t\tFile $filename executed and aborted."
- }
-
- # Dump out file and save a copy.
- error_check_good sync:$db [$db sync] 0
- open_and_dump_file $testdir/$filename NULL $final_file nop \
- dump_file_direction "-first" "-next"
- file copy -force $dir/$filename $dir/$filename.final
- copy_extent_file $dir $filename final
-
- # If this is an abort, it should match the original file.
- # If this was a commit, then this file should match the
- # afterop file.
- if { $op == "abort" } {
- filesort $init_file $init_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(initial,post-$op):diff($init_file,$final_file) \
- [filecmp $init_file.sort $final_file.sort] 0
- } else {
- filesort $afterop_file $afterop_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
- [filecmp $afterop_file.sort $final_file.sort] 0
- }
-
- error_check_good close:$db [$db close] 0
-}
-
-proc check_file { dir env_cmd filename op } {
- source ./include.tcl
-
- set init_file $dir/$filename.t1
- set afterop_file $dir/$filename.t2
- set final_file $dir/$filename.t3
-
- open_and_dump_file $testdir/$filename NULL $final_file nop \
- dump_file_direction "-first" "-next"
- if { $op == "abort" } {
- filesort $init_file $init_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(initial,post-$op):diff($init_file,$final_file) \
- [filecmp $init_file.sort $final_file.sort] 0
- } else {
- filesort $afterop_file $afterop_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
- [filecmp $afterop_file.sort $final_file.sort] 0
- }
-}
diff --git a/storage/bdb/test/recd006.tcl b/storage/bdb/test/recd006.tcl
deleted file mode 100644
index bca968e5bb3..00000000000
--- a/storage/bdb/test/recd006.tcl
+++ /dev/null
@@ -1,262 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd006.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd006
-# TEST Nested transactions.
-proc recd006 { method {select 0} args} {
- global kvals
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Recd006 skipping for method $method"
- return
- }
- puts "Recd006: $method nested transactions"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set dbfile recd006.db
- set testfile $testdir/$dbfile
-
- puts "\tRecd006.a: create database"
- set oflags "-create $args $omethod $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Make sure that we have enough entries to span a couple of
- # different pages.
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < 1000 } {
- if { [string compare $omethod "-recno"] == 0 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
-
- set ret [$db put -nooverwrite $key $str]
- error_check_good put $ret 0
-
- incr count
- }
- close $did
-
- # Variables used below:
- # p1: a pair of keys that are likely to be on the same page.
- # p2: a pair of keys that are likely to be on the same page,
- # but on a page different than those in p1.
- set dbc [$db cursor]
- error_check_good dbc [is_substr $dbc $db] 1
-
- set ret [$dbc get -first]
- error_check_bad dbc_get:DB_FIRST [llength $ret] 0
- set p1 [lindex [lindex $ret 0] 0]
- set kvals($p1) [lindex [lindex $ret 0] 1]
-
- set ret [$dbc get -next]
- error_check_bad dbc_get:DB_NEXT [llength $ret] 0
- lappend p1 [lindex [lindex $ret 0] 0]
- set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
-
- set ret [$dbc get -last]
- error_check_bad dbc_get:DB_LAST [llength $ret] 0
- set p2 [lindex [lindex $ret 0] 0]
- set kvals($p2) [lindex [lindex $ret 0] 1]
-
- set ret [$dbc get -prev]
- error_check_bad dbc_get:DB_PREV [llength $ret] 0
- lappend p2 [lindex [lindex $ret 0] 0]
- set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
-
- # Now create the full transaction environment.
- set eflags "-create -txn -home $testdir"
-
- puts "\tRecd006.b: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Reset the environment.
- reset_env $dbenv
-
- set p1 [list $p1]
- set p2 [list $p2]
-
- # List of recovery tests: {CMD MSG} pairs
- set rlist {
- { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
- "Recd006.c: children (commit commit)"}
- { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
- "Recd006.d: children (commit commit)"}
- { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
- "Recd006.e: children (commit abort)"}
- { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
- "Recd006.f: children (commit abort)"}
- { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
- "Recd006.g: children (abort abort)"}
- { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
- "Recd006.h: children (abort abort)"}
- { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
- "Recd006.i: children (abort commit)"}
- { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
- "Recd006.j: children (abort commit)"}
- }
-
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- op_recover abort $testdir $env_cmd $dbfile $cmd $msg
- op_recover commit $testdir $env_cmd $dbfile $cmd $msg
- }
-
- puts "\tRecd006.k: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
-
-# Do the nested transaction test.
-# We want to make sure that children inherit properly from their
-# parents and that locks are properly handed back to parents
-# and that the right thing happens on commit/abort.
-# In particular:
-# Write lock on parent, properly acquired by child.
-# Committed operation on child gives lock to parent so that
-# other child can also get the lock.
-# Aborted op by child releases lock so other child can get it.
-# Correct database state if child commits
-# Correct database state if child aborts
-proc nesttest { db parent env do p1 p2 child1 child2} {
- global kvals
- source ./include.tcl
-
- if { $do == 1 } {
- set func toupper
- } else {
- set func tolower
- }
-
- # Do an RMW on the parent to get a write lock.
- set p10 [lindex $p1 0]
- set p11 [lindex $p1 1]
- set p20 [lindex $p2 0]
- set p21 [lindex $p2 1]
-
- set ret [$db get -rmw -txn $parent $p10]
- set res $ret
- set Dret [lindex [lindex $ret 0] 1]
- if { [string compare $Dret $kvals($p10)] == 0 ||
- [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
- set val 0
- } else {
- set val $Dret
- }
- error_check_good get_parent_RMW $val 0
-
- # OK, do child 1
- set kid1 [$env txn -parent $parent]
- error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
-
- # Reading write-locked parent object should be OK
- #puts "\tRead write-locked parent object for kid1."
- set ret [$db get -txn $kid1 $p10]
- error_check_good kid1_get10 $ret $res
-
- # Now update this child
- set data [lindex [lindex [string $func $ret] 0] 1]
- set ret [$db put -txn $kid1 $p10 $data]
- error_check_good kid1_put10 $ret 0
-
- #puts "\tKid1 successful put."
-
- # Now start child2
- #puts "\tBegin txn for kid2."
- set kid2 [$env txn -parent $parent]
- error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
-
- # Getting anything in the p1 set should deadlock, so let's
- # work on the p2 set.
- set data [string $func $kvals($p20)]
- #puts "\tPut data for kid2."
- set ret [$db put -txn $kid2 $p20 $data]
- error_check_good kid2_put20 $ret 0
-
- #puts "\tKid2 data put successful."
-
- # Now let's do the right thing to kid1
- puts -nonewline "\tKid1 $child1..."
- if { [string compare $child1 "commit"] == 0 } {
- error_check_good kid1_commit [$kid1 commit] 0
- } else {
- error_check_good kid1_abort [$kid1 abort] 0
- }
- puts "complete"
-
- # In either case, child2 should now be able to get the
- # lock, either because it is inherited by the parent
- # (commit) or because it was released (abort).
- set data [string $func $kvals($p11)]
- set ret [$db put -txn $kid2 $p11 $data]
- error_check_good kid2_put11 $ret 0
-
- # Now let's do the right thing to kid2
- puts -nonewline "\tKid2 $child2..."
- if { [string compare $child2 "commit"] == 0 } {
- error_check_good kid2_commit [$kid2 commit] 0
- } else {
- error_check_good kid2_abort [$kid2 abort] 0
- }
- puts "complete"
-
- # Now, let parent check that the right things happened.
- # First get all four values
- set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
- set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
- set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
- set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
-
- if { [string compare $child1 "commit"] == 0 } {
- error_check_good parent_kid1 $p10_check \
- [string tolower [string $func $kvals($p10)]]
- } else {
- error_check_good \
- parent_kid1 $p10_check [string tolower $kvals($p10)]
- }
- if { [string compare $child2 "commit"] == 0 } {
- error_check_good parent_kid2 $p11_check \
- [string tolower [string $func $kvals($p11)]]
- error_check_good parent_kid2 $p20_check \
- [string tolower [string $func $kvals($p20)]]
- } else {
- error_check_good parent_kid2 $p11_check $kvals($p11)
- error_check_good parent_kid2 $p20_check $kvals($p20)
- }
-
- # Now do a write on the parent for 21 whose lock it should
- # either have or should be available.
- set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
- error_check_good parent_put21 $ret 0
-
- return 0
-}
diff --git a/storage/bdb/test/recd007.tcl b/storage/bdb/test/recd007.tcl
deleted file mode 100644
index 9764d840f62..00000000000
--- a/storage/bdb/test/recd007.tcl
+++ /dev/null
@@ -1,906 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd007.tcl,v 11.64 2004/07/07 19:08:21 carol Exp $
-#
-# TEST recd007
-# TEST File create/delete tests.
-# TEST
-# TEST This is a recovery test for create/delete of databases. We have
-# TEST hooks in the database so that we can abort the process at various
-# TEST points and make sure that the transaction doesn't commit. We
-# TEST then need to recover and make sure the file is correctly existing
-# TEST or not, as the case may be.
-proc recd007 { method args} {
- global fixed_len
- source ./include.tcl
-
- set orig_fixed_len $fixed_len
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd007: $method operation/transaction tests"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set testfile recd007.db
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd007.a: creating environment"
- set env_cmd "berkdb_env $flags"
-
- set env [eval $env_cmd]
-
- # We need to create a database to get the pagesize (either
- # the default or whatever might have been specified).
- # Then remove it so we can compute fixed_len and create the
- # real database.
- set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set stat [$db stat]
- #
- # Compute the fixed_len based on the pagesize being used.
- # We want the fixed_len to be 1/4 the pagesize.
- #
- set pg [get_pagesize $stat]
- error_check_bad get_pagesize $pg -1
- set fixed_len [expr $pg / 4]
- error_check_good db_close [$db close] 0
- error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
- error_check_good envclose [$env close] 0
-
- # Convert the args again because fixed_len is now real.
- set opts [convert_args $method ""]
- set save_opts $opts
- set moreopts {" -lorder 1234 " " -lorder 1234 -chksum " \
- " -lorder 4321 " " -lorder 4321 -chksum "}
-
- # List of recovery tests: {HOOKS MSG} pairs
- # Where each HOOK is a list of {COPY ABORT}
- #
- set rlist {
- { {"none" "preopen"} "Recd007.b0: none/preopen"}
- { {"none" "postopen"} "Recd007.b1: none/postopen"}
- { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
- { {"none" "postlog"} "Recd007.b3: none/postlog"}
- { {"none" "postsync"} "Recd007.b4: none/postsync"}
- { {"postopen" "none"} "Recd007.c0: postopen/none"}
- { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
- { {"postlog" "none"} "Recd007.c2: postlog/none"}
- { {"postsync" "none"} "Recd007.c3: postsync/none"}
- { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
- { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
- { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
- { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
- { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
- { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
- { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
- { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
- }
-
- # These are all the data values that we're going to need to read
- # through the operation table and run the recovery tests.
-
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
- #
- # Run natively
- #
- file_recover_create $testdir $env_cmd $omethod \
- $save_opts $testfile $cmd $msg
- foreach o $moreopts {
- set opts $save_opts
- append opts $o
- file_recover_create $testdir $env_cmd $omethod \
- $opts $testfile $cmd $msg
- }
- }
-
- set rlist {
- { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
- { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
- { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
- { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
- { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
- { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
- { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
- }
- foreach op { dbremove dbrename dbtruncate } {
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
- file_recover_delete $testdir $env_cmd $omethod \
- $save_opts $testfile $cmd $msg $op
- foreach o $moreopts {
- set opts $save_opts
- append opts $o
- file_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile $cmd $msg $op
- }
- }
- }
-
- if { $is_windows_test != 1 } {
- set env_cmd "berkdb_env_noerr $flags"
- do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
- }
-
- puts "\tRecd007.r: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
- set fixed_len $orig_fixed_len
- return
-}
-
-proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
- #
- # We run this test on each of these scenarios:
- # 1. Creating just a database
- # 2. Creating a database with a subdb
- # 3. Creating a 2nd subdb in a database
- puts "\t$msg ($opts) create with a database"
- do_file_recover_create $dir $env_cmd $method $opts $dbfile \
- 0 $cmd $msg
- if { [is_queue $method] == 1 } {
- puts "\tSkipping subdatabase tests for method $method"
- return
- }
- puts "\t$msg ($opts) create with a database and subdb"
- do_file_recover_create $dir $env_cmd $method $opts $dbfile \
- 1 $cmd $msg
- puts "\t$msg ($opts) create with a database and 2nd subdb"
- do_file_recover_create $dir $env_cmd $method $opts $dbfile \
- 2 $cmd $msg
-
-}
-
-proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
- global log_log_record_types
- source ./include.tcl
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- env_cleanup $dir
- set dflags "-dar"
- # Open the environment and set the copy/abort locations
- set env [eval $env_cmd]
- set copy [lindex $cmd 0]
- set abort [lindex $cmd 1]
- error_check_good copy_location [is_valid_create_loc $copy] 1
- error_check_good abort_location [is_valid_create_loc $abort] 1
-
- if {([string first "logmeta" $copy] != -1 || \
- [string first "logmeta" $abort] != -1) && \
- [is_btree $method] == 0 } {
- puts "\tSkipping for method $method"
- $env test copy none
- $env test abort none
- error_check_good env_close [$env close] 0
- return
- }
-
- # Basically non-existence is our initial state. When we
- # abort, it is also our final state.
- #
- switch $sub {
- 0 {
- set oflags "-create $method -auto_commit -mode 0644 \
- -env $env $opts $dbfile"
- }
- 1 {
- set oflags "-create $method -auto_commit -mode 0644 \
- -env $env $opts $dbfile sub0"
- }
- 2 {
- #
- # If we are aborting here, then we need to
- # create a first subdb, then create a second
- #
- set oflags "-create $method -auto_commit -mode 0644 \
- -env $env $opts $dbfile sub0"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
- set init_file $dir/$dbfile.init
- catch { file copy -force $dir/$dbfile $init_file } res
- set oflags "-create $method -auto_commit -mode 0644 \
- -env $env $opts $dbfile sub1"
- }
- default {
- puts "\tBad value $sub for sub"
- return
- }
- }
- #
- # Set our locations to copy and abort
- #
- set ret [eval $env test copy $copy]
- error_check_good test_copy $ret 0
- set ret [eval $env test abort $abort]
- error_check_good test_abort $ret 0
-
- puts "\t\tExecuting command"
- set ret [catch {eval {berkdb_open} $oflags} db]
-
- # Sync the mpool so any changes to the file that are
- # in mpool get written to the disk file before the
- # diff.
- $env mpool_sync
-
- #
- # If we don't abort, then we expect success.
- # If we abort, we expect no file created.
- #
- if {[string first "none" $abort] == -1} {
- #
- # Operation was aborted, verify it does
- # not exist.
- #
- puts "\t\tCommand executed and aborted."
- error_check_bad db_open ret 0
-
- #
- # Check that the file does not exist. Final state.
- #
- if { $sub != 2 } {
- error_check_good db_open:exists \
- [file exists $dir/$dbfile] 0
- } else {
- error_check_good \
- diff(init,postcreate):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- }
- } else {
- #
- # Operation was committed, verify it exists.
- #
- puts "\t\tCommand executed and committed."
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- #
- # Check that the file exists.
- #
- error_check_good db_open [file exists $dir/$dbfile] 1
- set init_file $dir/$dbfile.init
- catch { file copy -force $dir/$dbfile $init_file } res
-
- if { [is_queue $method] == 1 } {
- copy_extent_file $dir $dbfile init
- }
- }
- error_check_good env_close [$env close] 0
-
- #
- # Run recovery here. Should be a no-op. Verify that
- # the file still doesn't exist or change (depending on sub)
- # when we are done.
- #
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- if { $sub != 2 && [string first "none" $abort] == -1} {
- #
- # Operation was aborted, verify it still does
- # not exist. Only done with file creations.
- #
- error_check_good after_recover1 [file exists $dir/$dbfile] 0
- } else {
- #
- # Operation was committed or just a subdb was aborted.
- # Verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- #
- # Need a new copy to get the right LSN into the file.
- #
- catch { file copy -force $dir/$dbfile $init_file } res
-
- if { [is_queue $method] == 1 } {
- copy_extent_file $dir $dbfile init
- }
- }
-
- # If we didn't make a copy, then we are done.
- #
- if {[string first "none" $copy] != -1} {
- return
- }
-
- #
- # Now move the .afterop file to $dbfile. Run recovery again.
- #
- copy_afterop $dir
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- if { $sub != 2 && [string first "none" $abort] == -1} {
- #
- # Operation was aborted, verify it still does
- # not exist. Only done with file creations.
- #
- error_check_good after_recover2 [file exists $dir/$dbfile] 0
- } else {
- #
- # Operation was committed or just a subdb was aborted.
- # Verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- }
-
-}
-
-proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
- #
- # We run this test on each of these scenarios:
- # 1. Deleting/Renaming just a database
- # 2. Deleting/Renaming a database with a subdb
- # 3. Deleting/Renaming a 2nd subdb in a database
- puts "\t$msg $op ($opts) with a database"
- do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
- 0 $cmd $msg $op
- if { [is_queue $method] == 1 } {
- puts "\tSkipping subdatabase tests for method $method"
- return
- }
- puts "\t$msg $op ($opts) with a database and subdb"
- do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
- 1 $cmd $msg $op
- puts "\t$msg $op ($opts) with a database and 2nd subdb"
- do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
- 2 $cmd $msg $op
-
-}
-
-proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
- global log_log_record_types
- source ./include.tcl
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- env_cleanup $dir
- # Open the environment and set the copy/abort locations
- set env [eval $env_cmd]
- set copy [lindex $cmd 0]
- set abort [lindex $cmd 1]
- error_check_good copy_location [is_valid_delete_loc $copy] 1
- error_check_good abort_location [is_valid_delete_loc $abort] 1
-
- if { [is_record_based $method] == 1 } {
- set key1 1
- set key2 2
- } else {
- set key1 recd007_key1
- set key2 recd007_key2
- }
- set data1 recd007_data0
- set data2 recd007_data1
- set data3 NEWrecd007_data2
-
- #
- # Depending on what sort of subdb we want, if any, our
- # args to the open call will be different (and if we
- # want a 2nd subdb, we create the first here.
- #
- # XXX
- # For dbtruncate, we want oflags to have "$env" in it,
- # not have the value currently in 'env'. That is why
- # the '$' is protected below. Later on we use oflags
- # but with a new $env we just opened.
- #
- switch $sub {
- 0 {
- set subdb ""
- set new $dbfile.new
- set dflags "-dar"
- set oflags "-create $method -auto_commit -mode 0644 \
- -env \$env $opts $dbfile"
- }
- 1 {
- set subdb sub0
- set new $subdb.new
- set dflags ""
- set oflags "-create $method -auto_commit -mode 0644 \
- -env \$env $opts $dbfile $subdb"
- }
- 2 {
- #
- # If we are aborting here, then we need to
- # create a first subdb, then create a second
- #
- set subdb sub1
- set new $subdb.new
- set dflags ""
- set oflags "-create $method -auto_commit -mode 0644 \
- -env \$env $opts $dbfile sub0"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn [$env txn]
- set ret [$db put -txn $txn $key1 $data1]
- error_check_good db_put $ret 0
- error_check_good commit [$txn commit] 0
- error_check_good db_close [$db close] 0
- set oflags "-create $method -auto_commit -mode 0644 \
- -env \$env $opts $dbfile $subdb"
- }
- default {
- puts "\tBad value $sub for sub"
- return
- }
- }
-
- #
- # Set our locations to copy and abort
- #
- set ret [eval $env test copy $copy]
- error_check_good test_copy $ret 0
- set ret [eval $env test abort $abort]
- error_check_good test_abort $ret 0
-
- #
- # Open our db, add some data, close and copy as our
- # init file.
- #
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn [$env txn]
- set ret [$db put -txn $txn $key1 $data1]
- error_check_good db_put $ret 0
- set ret [$db put -txn $txn $key2 $data2]
- error_check_good db_put $ret 0
- error_check_good commit [$txn commit] 0
- error_check_good db_close [$db close] 0
-
- $env mpool_sync
-
- set init_file $dir/$dbfile.init
- catch { file copy -force $dir/$dbfile $init_file } res
-
- if { [is_queue $method] == 1} {
- copy_extent_file $dir $dbfile init
- }
-
- #
- # If we don't abort, then we expect success.
- # If we abort, we expect no file removed.
- #
- switch $op {
- "dbrename" {
- set ret [catch { eval {berkdb} $op -env $env -auto_commit \
- $dbfile $subdb $new } remret]
- }
- "dbremove" {
- set ret [catch { eval {berkdb} $op -env $env -auto_commit \
- $dbfile $subdb } remret]
- }
- "dbtruncate" {
- set txn [$env txn]
- set db [eval {berkdb_open_noerr -env} \
- $env -auto_commit $dbfile $subdb]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good txnbegin [is_valid_txn $txn $env] TRUE
- set ret [catch {$db truncate -txn $txn} remret]
- }
- }
- $env mpool_sync
- if { $abort == "none" } {
- if { $op == "dbtruncate" } {
- error_check_good txncommit [$txn commit] 0
- error_check_good dbclose [$db close] 0
- }
- #
- # Operation was committed, verify it.
- #
- puts "\t\tCommand executed and committed."
- error_check_good $op $ret 0
- #
- # If a dbtruncate, check that truncate returned the number
- # of items previously in the database.
- #
- if { [string compare $op "dbtruncate"] == 0 } {
- error_check_good remret $remret 2
- }
- recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
- } else {
- #
- # Operation was aborted, verify it did not change.
- #
- if { $op == "dbtruncate" } {
- error_check_good txnabort [$txn abort] 0
- error_check_good dbclose [$db close] 0
- }
- puts "\t\tCommand executed and aborted."
- error_check_good $op $ret 1
-
- #
- # Check that the file exists. Final state.
- # Compare against initial file.
- #
- error_check_good post$op.1 [file exists $dir/$dbfile] 1
- error_check_good \
- diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- }
- $env mpool_sync
- error_check_good env_close [$env close] 0
- catch { file copy -force $dir/$dbfile $init_file } res
- if { [is_queue $method] == 1} {
- copy_extent_file $dir $dbfile init
- }
-
-
- #
- # Run recovery here. Should be a no-op. Verify that
- # the file still doesn't exist or change (depending on abort)
- # when we are done.
- #
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
-
- puts "complete"
-
- if { $abort == "none" } {
- #
- # Operate was committed.
- #
- set env [eval $env_cmd]
- recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
- error_check_good env_close [$env close] 0
- } else {
- #
- # Operation was aborted, verify it did not change.
- #
- berkdb debug_check
- error_check_good \
- diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- }
-
- #
- # If we didn't make a copy, then we are done.
- #
- if {[string first "none" $copy] != -1} {
- return
- }
-
- #
- # Now restore the .afterop file(s) to their original name.
- # Run recovery again.
- #
- copy_afterop $dir
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
-
- if { [string first "none" $abort] != -1} {
- set env [eval $env_cmd]
- recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
- error_check_good env_close [$env close] 0
- } else {
- #
- # Operation was aborted, verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $dflags $init_file $dir $dbfile] 0
- }
-
-}
-
-#
-# This function tests a specific case of recovering after a db removal.
-# This is for SR #2538. Basically we want to test that:
-# - Make an env.
-# - Make/close a db.
-# - Remove the db.
-# - Create another db of same name.
-# - Sync db but leave open.
-# - Run recovery.
-# - Verify no recovery errors and that new db is there.
-proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
- global log_log_record_types
- source ./include.tcl
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
- set omethod [convert_method $method]
-
- puts "\tRecd007.q: Delete and recreate a database"
- env_cleanup $dir
- # Open the environment and set the copy/abort locations
- set env [eval $env_cmd]
- error_check_good env_open [is_valid_env $env] TRUE
-
- if { [is_record_based $method] == 1 } {
- set key 1
- } else {
- set key recd007_key
- }
- set data1 recd007_data
- set data2 NEWrecd007_data2
-
- set oflags \
- "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
-
- #
- # Open our db, add some data, close and copy as our
- # init file.
- #
- set db [eval {berkdb_open_noerr} -env $env $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn [$env txn]
- set ret [$db put -txn $txn $key $data1]
- error_check_good db_put $ret 0
- error_check_good commit [$txn commit] 0
- error_check_good db_close [$db close] 0
-
- set ret \
- [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
-
- #
- # Operation was committed, verify it does
- # not exist.
- #
- puts "\t\tCommand executed and committed."
- error_check_good dbremove $ret 0
- error_check_good dbremove.1 [file exists $dir/$dbfile] 0
-
- #
- # Now create a new db with the same name.
- #
- set db [eval {berkdb_open_noerr} -env $env $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn [$env txn]
- set ret [$db put -txn $txn $key [chop_data $method $data2]]
- error_check_good db_put $ret 0
- error_check_good commit [$txn commit] 0
- error_check_good db_sync [$db sync] 0
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- error_check_good db_recover $stat 0
- error_check_good db_recover.1 [file exists $dir/$dbfile] 1
- #
- # Since we ran recovery on the open db/env, we need to
- # catch these calls. Basically they are there to clean
- # up the Tcl widgets.
- #
- set stat [catch {$db close} ret]
- error_check_bad dbclose_after_remove $stat 0
- error_check_good dbclose_after_remove [is_substr $ret recovery] 1
- set stat [catch {$env close} ret]
- error_check_bad envclose_after_remove $stat 0
- error_check_good envclose_after_remove [is_substr $ret recovery] 1
-
- #
- # Reopen env and db and verify 2nd database is there.
- #
- set env [eval $env_cmd]
- error_check_good env_open [is_valid_env $env] TRUE
- set db [eval {berkdb_open} -env $env $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set ret [$db get $key]
- error_check_good dbget [llength $ret] 1
- set kd [lindex $ret 0]
- error_check_good key [lindex $kd 0] $key
- error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
-
- error_check_good dbclose [$db close] 0
- error_check_good envclose [$env close] 0
-}
-
-proc is_valid_create_loc { loc } {
- switch $loc {
- none -
- preopen -
- postopen -
- postlogmeta -
- postlog -
- postsync
- { return 1 }
- default
- { return 0 }
- }
-}
-
-proc is_valid_delete_loc { loc } {
- switch $loc {
- none -
- predestroy -
- postdestroy -
- postremcall
- { return 1 }
- default
- { return 0 }
- }
-}
-
-# Do a logical diff on the db dump files. We expect that either
-# the files are identical, or if they differ, that it is exactly
-# just a free/invalid page.
-# Return 1 if they are different, 0 if logically the same (or identical).
-#
-proc dbdump_diff { flags initfile dir dbfile } {
- source ./include.tcl
-
- set initdump $initfile.dump
- set dbdump $dbfile.dump
-
- set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
- $initfile} ret]
- error_check_good dbdump.init $stat 0
-
- # Do a dump without the freelist which should eliminate any
- # recovery differences.
- set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
- $dir/$dbfile} ret]
- error_check_good dbdump.db $stat 0
-
- set stat [filecmp $dir/$dbdump $initdump]
-
- if {$stat == 0} {
- return 0
- }
- puts "diff: $dbdump $initdump gives:\n$ret"
- return 1
-}
-
-proc recd007_check { op sub dir dbfile subdb new env oflags } {
- #
- # No matter how many subdbs we have, dbtruncate will always
- # have a file, and if we open our particular db, it should
- # have no entries.
- #
- if { $sub == 0 } {
- if { $op == "dbremove" } {
- error_check_good $op:not-exist \
- [file exists $dir/$dbfile] 0
- } elseif { $op == "dbrename"} {
- error_check_good $op:exist \
- [file exists $dir/$dbfile] 0
- error_check_good $op:exist2 \
- [file exists $dir/$dbfile.new] 1
- } else {
- error_check_good $op:exist \
- [file exists $dir/$dbfile] 1
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good dbc_open \
- [is_valid_cursor $dbc $db] TRUE
- set ret [$dbc get -first]
- error_check_good dbget1 [llength $ret] 0
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- }
- return
- } else {
- set t1 $dir/t1
- #
- # If we have subdbs, check that all but the last one
- # are there, and the last one is correctly operated on.
- #
- set db [berkdb_open -rdonly -env $env $dbfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set c [eval {$db cursor}]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- set d [$c get -last]
- if { $op == "dbremove" } {
- if { $sub == 1 } {
- error_check_good subdb:rem [llength $d] 0
- } else {
- error_check_bad subdb:rem [llength $d] 0
- set sdb [lindex [lindex $d 0] 0]
- error_check_bad subdb:rem1 $sdb $subdb
- }
- } elseif { $op == "dbrename"} {
- set sdb [lindex [lindex $d 0] 0]
- error_check_good subdb:ren $sdb $new
- if { $sub != 1 } {
- set d [$c get -prev]
- error_check_bad subdb:ren [llength $d] 0
- set sdb [lindex [lindex $d 0] 0]
- error_check_good subdb:ren1 \
- [is_substr "new" $sdb] 0
- }
- } else {
- set sdb [lindex [lindex $d 0] 0]
- set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
- error_check_good db_open [is_valid_db $dbt] TRUE
- set dbc [$dbt cursor]
- error_check_good dbc_open \
- [is_valid_cursor $dbc $dbt] TRUE
- set ret [$dbc get -first]
- error_check_good dbget2 [llength $ret] 0
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$dbt close] 0
- if { $sub != 1 } {
- set d [$c get -prev]
- error_check_bad subdb:ren [llength $d] 0
- set sdb [lindex [lindex $d 0] 0]
- set dbt [berkdb_open -rdonly -env $env \
- $dbfile $sdb]
- error_check_good db_open [is_valid_db $dbt] TRUE
- set dbc [$db cursor]
- error_check_good dbc_open \
- [is_valid_cursor $dbc $db] TRUE
- set ret [$dbc get -first]
- error_check_bad dbget3 [llength $ret] 0
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$dbt close] 0
- }
- }
- error_check_good dbcclose [$c close] 0
- error_check_good db_close [$db close] 0
- }
-}
-
-proc copy_afterop { dir } {
- set r [catch { set filecopy [glob $dir/*.afterop] } res]
- if { $r == 1 } {
- return
- }
- foreach f $filecopy {
- set orig [string range $f 0 \
- [expr [string last "." $f] - 1]]
- catch { file rename -force $f $orig} res
- }
-}
diff --git a/storage/bdb/test/recd008.tcl b/storage/bdb/test/recd008.tcl
deleted file mode 100644
index b12c757854a..00000000000
--- a/storage/bdb/test/recd008.tcl
+++ /dev/null
@@ -1,226 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd008.tcl,v 1.30 2004/11/05 00:59:01 mjc Exp $
-#
-# TEST recd008
-# TEST Test deeply nested transactions and many-child transactions.
-proc recd008 { method {breadth 4} {depth 4} args} {
- global kvals
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd008: $method $breadth X $depth deeply nested transactions"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set dbfile recd008.db
-
- puts "\tRecd008.a: create database"
- set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Make sure that we have enough entries to span a couple of
- # different pages.
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < 1000 } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- if { $count == 500} {
- set p1 $key
- set kvals($p1) $str
- }
- set ret [$db put $key [chop_data $method $str]]
- error_check_good put $ret 0
-
- incr count
- }
- close $did
- error_check_good db_close [$db close] 0
-
- set txn_max [expr int([expr pow($breadth,$depth)])]
- if { $txn_max < 20 } {
- set txn_max 20
- }
- puts "\tRecd008.b: create environment for $txn_max transactions"
-
- set lock_max 2500
- set eflags "-mode 0644 -create -lock_max $lock_max -txn_max $txn_max \
- -txn -home $testdir"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_good env_open [is_valid_env $dbenv] TRUE
-
- reset_env $dbenv
-
- set rlist {
- { {recd008_parent abort ENV DB $method $p1 TXNID 1 1 $breadth $depth}
- "Recd008.c: child abort parent" }
- { {recd008_parent commit ENV DB $method $p1 TXNID 1 1 $breadth $depth}
- "Recd008.d: child commit parent" }
- }
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- op_recover abort $testdir $env_cmd $dbfile $cmd $msg
- recd008_setkval $dbfile $p1
- op_recover commit $testdir $env_cmd $dbfile $cmd $msg
- recd008_setkval $dbfile $p1
- }
-
- puts "\tRecd008.e: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
-
-proc recd008_setkval { dbfile p1 } {
- global kvals
- source ./include.tcl
-
- set db [berkdb_open $testdir/$dbfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set ret [$db get $p1]
- error_check_good dbclose [$db close] 0
- set kvals($p1) [lindex [lindex $ret 0] 1]
-}
-
-# This is a lot like the op_recover procedure. We cannot use that
-# because it was not meant to be called recursively. This proc
-# knows about depth/breadth and file naming so that recursive calls
-# don't overwrite various initial and afterop files, etc.
-#
-# The basic flow of this is:
-# (Initial file)
-# Parent begin transaction (in op_recover)
-# Parent starts children
-# Recursively call recd008_recover
-# (children modify p1)
-# Parent modifies p1
-# (Afterop file)
-# Parent commit/abort (in op_recover)
-# (Final file)
-# Recovery test (in op_recover)
-proc recd008_parent { op env db method p1key parent b0 d0 breadth depth } {
- global kvals
- source ./include.tcl
-
- #
- # Save copy of original data
- # Acquire lock on data
- #
- set olddata [pad_data $method $kvals($p1key)]
- set ret [$db get -rmw -txn $parent $p1key]
- set Dret [lindex [lindex $ret 0] 1]
- error_check_good get_parent_RMW $Dret $olddata
-
- #
- # Parent spawns off children
- #
- set ret [recd008_txn $op $env $db $method $p1key $parent \
- $b0 $d0 $breadth $depth]
-
- puts "Child runs complete. Parent modifies data."
-
- #
- # Parent modifies p1
- #
- set newdata $olddata.parent
- set ret [$db put -txn $parent $p1key [chop_data $method $newdata]]
- error_check_good db_put $ret 0
-
- #
- # Save value in kvals for later comparison
- #
- switch $op {
- "commit" {
- set kvals($p1key) $newdata
- }
- "abort" {
- set kvals($p1key) $olddata
- }
- }
- return 0
-}
-
-proc recd008_txn { op env db method p1key parent b0 d0 breadth depth } {
- global log_log_record_types
- global kvals
- source ./include.tcl
-
- for {set d 1} {$d < $d0} {incr d} {
- puts -nonewline "\t"
- }
- puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
-
- # Save the initial file and open the environment and the file
- for {set b $b0} {$b <= $breadth} {incr b} {
- #
- # Begin child transaction
- #
- set t [$env txn -parent $parent]
- error_check_bad txn_begin $t NULL
- error_check_good txn_begin [is_valid_txn $t $env] TRUE
- set startd [expr $d0 + 1]
- set child $b:$startd:$t
- set olddata [pad_data $method $kvals($p1key)]
- set newdata $olddata.$child
- set ret [$db get -rmw -txn $t $p1key]
- set Dret [lindex [lindex $ret 0] 1]
- error_check_good get_parent_RMW $Dret $olddata
-
- #
- # Recursively call to set up nested transactions/children
- #
- for {set d $startd} {$d <= $depth} {incr d} {
- set ret [recd008_txn commit $env $db $method $p1key $t \
- $b $d $breadth $depth]
- set ret [recd008_txn abort $env $db $method $p1key $t \
- $b $d $breadth $depth]
- }
- #
- # Modifies p1.
- #
- set ret [$db put -txn $t $p1key [chop_data $method $newdata]]
- error_check_good db_put $ret 0
-
- #
- # Commit or abort
- #
- for {set d 1} {$d < $startd} {incr d} {
- puts -nonewline "\t"
- }
- puts "Executing txn_$op:$t"
- error_check_good txn_$op:$t [$t $op] 0
- for {set d 1} {$d < $startd} {incr d} {
- puts -nonewline "\t"
- }
- set ret [$db get -rmw -txn $parent $p1key]
- set Dret [lindex [lindex $ret 0] 1]
- set newdata [pad_data $method $newdata]
- switch $op {
- "commit" {
- puts "Command executed and committed."
- error_check_good get_parent_RMW $Dret $newdata
- set kvals($p1key) $newdata
- }
- "abort" {
- puts "Command executed and aborted."
- error_check_good get_parent_RMW $Dret $olddata
- set kvals($p1key) $olddata
- }
- }
- }
- return 0
-}
diff --git a/storage/bdb/test/recd009.tcl b/storage/bdb/test/recd009.tcl
deleted file mode 100644
index d6700a0bd54..00000000000
--- a/storage/bdb/test/recd009.tcl
+++ /dev/null
@@ -1,180 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd009.tcl,v 1.20 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd009
-# TEST Verify record numbering across split/reverse splits and recovery.
-proc recd009 { method {select 0} args} {
- global fixed_len
- source ./include.tcl
-
- if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
- puts "Recd009 skipping for method $method."
- return
- }
-
- set opts [convert_args $method $args]
- set method [convert_method $method]
-
- puts "\tRecd009: Test record numbers across splits and recovery"
-
- set testfile recd009.db
- env_cleanup $testdir
- set mkeys 1000
- set nkeys 5
- set data "data"
-
- puts "\tRecd009.a: Create $method environment and database."
- set flags "-create -txn -home $testdir"
-
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Fill page with small key/data pairs. Keep at leaf.
- puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { [is_recno $method] == 1 } {
- set key $i
- } else {
- set key key000$i
- }
- set ret [$db put $key $data$i]
- error_check_good dbput $ret 0
- }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
-
- set newnkeys [expr $nkeys + 1]
- # List of recovery tests: {CMD MSG} pairs.
- set rlist {
- { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
- "Recd009.c: split"}
- { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
- "Recd009.d: reverse split"}
- }
-
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- set reverse [string first "reverse" $msg]
- if { $reverse == -1 } {
- set abortkeys $nkeys
- set commitkeys $mkeys
- set abortpg 0
- set commitpg 1
- } else {
- set abortkeys $mkeys
- set commitkeys $nkeys
- set abortpg 1
- set commitpg 0
- }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- recd009_recnocheck $testdir $testfile $opts \
- $commitkeys $commitpg
- }
- puts "\tRecd009.e: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
-
-#
-# This procedure verifies that the database has only numkeys number
-# of keys and that they are in order.
-#
-proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
- source ./include.tcl
-
- set db [eval {berkdb_open} $opts $tdir/$testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
- set stat [$db stat]
- error_check_bad stat:check-split [is_substr $stat \
- "{{Internal pages} 0}"] $numpg
-
- set type [$db get_type]
- set dbc [$db cursor]
- error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
- set i 1
- puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
- for {set d [$dbc get -first]} { [llength $d] != 0 } {
- set d [$dbc get -next]} {
- if { [is_btree $type] } {
- set thisi [$dbc get -get_recno]
- } else {
- set thisi [lindex [lindex $d 0] 0]
- }
- error_check_good recno_check $i $thisi
- error_check_good record_count [expr $i <= $numkeys] 1
- incr i
- }
- error_check_good curs_close [$dbc close] 0
- error_check_good db_close [$db close] 0
-}
-
-proc recd009_split { db txn split method nkeys mkeys } {
- global errorCode
- source ./include.tcl
-
- set data "data"
-
- set isrecno [is_recno $method]
- # if mkeys is above 1000, need to adjust below for lexical order
- if { $split == 1 } {
- puts "\tRecd009_split: Add $mkeys pairs to force split."
- for {set i $nkeys} { $i <= $mkeys } { incr i } {
- if { $isrecno == 1 } {
- set key $i
- } else {
- if { $i >= 100 } {
- set key key0$i
- } elseif { $i >= 10 } {
- set key key00$i
- } else {
- set key key000$i
- }
- }
- set ret [$db put -txn $txn $key $data$i]
- error_check_good dbput:more $ret 0
- }
- } else {
- puts "\tRecd009_split: Delete added keys to force reverse split."
- # Since rrecno renumbers, we delete downward.
- for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
- if { $isrecno == 1 } {
- set key $i
- } else {
- if { $i >= 100 } {
- set key key0$i
- } elseif { $i >= 10 } {
- set key key00$i
- } else {
- set key key000$i
- }
- }
- error_check_good db_del:$i [$db del -txn $txn $key] 0
- }
- }
- return 0
-}
diff --git a/storage/bdb/test/recd010.tcl b/storage/bdb/test/recd010.tcl
deleted file mode 100644
index a2df7a47c6f..00000000000
--- a/storage/bdb/test/recd010.tcl
+++ /dev/null
@@ -1,257 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd010.tcl,v 1.21 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd010
-# TEST Test stability of btree duplicates across btree off-page dup splits
-# TEST and reverse splits and across recovery.
-proc recd010 { method {select 0} args} {
- if { [is_btree $method] != 1 } {
- puts "Recd010 skipping for method $method."
- return
- }
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd010: skipping for specific pagesizes"
- return
- }
- set largs $args
- append largs " -dup "
- recd010_main $method $select $largs
- append largs " -dupsort "
- recd010_main $method $select $largs
-}
-
-proc recd010_main { method select largs } {
- global fixed_len
- global kvals
- global kvals_dups
- source ./include.tcl
-
-
- set opts [convert_args $method $largs]
- set method [convert_method $method]
-
- puts "Recd010 ($opts): Test duplicates across splits and recovery"
-
- set testfile recd010.db
- env_cleanup $testdir
- #
- # Set pagesize small to generate lots of off-page dups
- #
- set page 512
- set mkeys 1000
- set firstkeys 5
- set data "data"
- set key "recd010_key"
-
- puts "\tRecd010.a: Create environment and database."
- set flags "-create -txn -home $testdir"
-
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set oflags "-env $dbenv -create -mode 0644 $opts $method"
- set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Fill page with small key/data pairs. Keep at leaf.
- puts "\tRecd010.b: Fill page with $firstkeys small dups."
- for { set i 1 } { $i <= $firstkeys } { incr i } {
- set ret [$db put $key $data$i]
- error_check_good dbput $ret 0
- }
- set kvals 1
- set kvals_dups $firstkeys
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
-
- # List of recovery tests: {CMD MSG} pairs.
- if { $mkeys < 100 } {
- puts "Recd010 mkeys of $mkeys too small"
- return
- }
- set rlist {
- { {recd010_split DB TXNID 1 2 $mkeys}
- "Recd010.c: btree split 2 large dups"}
- { {recd010_split DB TXNID 0 2 $mkeys}
- "Recd010.d: btree reverse split 2 large dups"}
- { {recd010_split DB TXNID 1 10 $mkeys}
- "Recd010.e: btree split 10 dups"}
- { {recd010_split DB TXNID 0 10 $mkeys}
- "Recd010.f: btree reverse split 10 dups"}
- { {recd010_split DB TXNID 1 100 $mkeys}
- "Recd010.g: btree split 100 dups"}
- { {recd010_split DB TXNID 0 100 $mkeys}
- "Recd010.h: btree reverse split 100 dups"}
- }
-
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
- set reverse [string first "reverse" $msg]
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- recd010_check $testdir $testfile $opts abort $reverse $firstkeys
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- recd010_check $testdir $testfile $opts commit $reverse $firstkeys
- }
- puts "\tRecd010.i: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
-
-#
-# This procedure verifies that the database has only numkeys number
-# of keys and that they are in order.
-#
-proc recd010_check { tdir testfile opts op reverse origdups } {
- global kvals
- global kvals_dups
- source ./include.tcl
-
- set db [eval {berkdb_open} $opts $tdir/$testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set data "data"
-
- if { $reverse == -1 } {
- puts "\tRecd010_check: Verify split after $op"
- } else {
- puts "\tRecd010_check: Verify reverse split after $op"
- }
-
- set stat [$db stat]
- if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
- ([string compare $op "commit"] == 0 && $reverse != -1)]} {
- set numkeys 0
- set allkeys [expr $numkeys + 1]
- set numdups $origdups
- #
- # If we abort the adding of dups, or commit
- # the removal of dups, either way check that
- # we are back at the beginning. Check that:
- # - We have 0 internal pages.
- # - We have only 1 key (the original we primed the db
- # with at the beginning of the test).
- # - We have only the original number of dups we primed
- # the db with at the beginning of the test.
- #
- error_check_good stat:orig0 [is_substr $stat \
- "{{Internal pages} 0}"] 1
- error_check_good stat:orig1 [is_substr $stat \
- "{{Number of keys} 1}"] 1
- error_check_good stat:orig2 [is_substr $stat \
- "{{Number of records} $origdups}"] 1
- } else {
- set numkeys $kvals
- set allkeys [expr $numkeys + 1]
- set numdups $kvals_dups
- #
- # If we abort the removal of dups, or commit the
- # addition of dups, check that:
- # - We have > 0 internal pages.
- # - We have the number of keys.
- #
- error_check_bad stat:new0 [is_substr $stat \
- "{{Internal pages} 0}"] 1
- error_check_good stat:new1 [is_substr $stat \
- "{{Number of keys} $allkeys}"] 1
- }
-
- set dbc [$db cursor]
- error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
- puts "\tRecd010_check: Checking key and duplicate values"
- set key "recd010_key"
- #
- # Check dups are there as they should be.
- #
- for {set ki 0} {$ki < $numkeys} {incr ki} {
- set datacnt 0
- for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
- set d [$dbc get -nextdup]} {
- set thisdata [lindex [lindex $d 0] 1]
- if { $datacnt < 10 } {
- set pdata $data.$ki.00$datacnt
- } elseif { $datacnt < 100 } {
- set pdata $data.$ki.0$datacnt
- } else {
- set pdata $data.$ki.$datacnt
- }
- error_check_good dup_check $thisdata $pdata
- incr datacnt
- }
- error_check_good dup_count $datacnt $numdups
- }
- #
- # Check that the number of expected keys (allkeys) are
- # all of the ones that exist in the database.
- #
- set dupkeys 0
- set lastkey ""
- for {set d [$dbc get -first]} { [llength $d] != 0 } {
- set d [$dbc get -next]} {
- set thiskey [lindex [lindex $d 0] 0]
- if { [string compare $lastkey $thiskey] != 0 } {
- incr dupkeys
- }
- set lastkey $thiskey
- }
- error_check_good key_check $allkeys $dupkeys
- error_check_good curs_close [$dbc close] 0
- error_check_good db_close [$db close] 0
-}
-
-proc recd010_split { db txn split nkeys mkeys } {
- global errorCode
- global kvals
- global kvals_dups
- source ./include.tcl
-
- set data "data"
- set key "recd010_key"
-
- set numdups [expr $mkeys / $nkeys]
-
- set kvals $nkeys
- set kvals_dups $numdups
- if { $split == 1 } {
- puts \
-"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
- for {set k 0} { $k < $nkeys } { incr k } {
- for {set i 0} { $i < $numdups } { incr i } {
- if { $i < 10 } {
- set pdata $data.$k.00$i
- } elseif { $i < 100 } {
- set pdata $data.$k.0$i
- } else {
- set pdata $data.$k.$i
- }
- set ret [$db put -txn $txn $key$k $pdata]
- error_check_good dbput:more $ret 0
- }
- }
- } else {
- puts \
-"\tRecd010_split: Delete $nkeys keys to force reverse split."
- for {set k 0} { $k < $nkeys } { incr k } {
- error_check_good db_del:$k [$db del -txn $txn $key$k] 0
- }
- }
- return 0
-}
diff --git a/storage/bdb/test/recd011.tcl b/storage/bdb/test/recd011.tcl
deleted file mode 100644
index bf118905b98..00000000000
--- a/storage/bdb/test/recd011.tcl
+++ /dev/null
@@ -1,136 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd011.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $
-#
-# TEST recd011
-# TEST Verify that recovery to a specific timestamp works.
-proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
- source ./include.tcl
- global rand_init
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "011"
-
- puts "Recd$tnum ($method $args): Test recovery to a specific timestamp."
-
- set testfile recd$tnum.db
- env_cleanup $testdir
-
- set i 0
- if { [is_record_based $method] == 1 } {
- set key 1
- set bigkey 1001
- } else {
- set key KEY
- set bigkey BIGKEY
- }
-
- puts "\tRecd$tnum.a: Create environment and database."
- set bufsize [expr 8 * 1024]
- set maxsize [expr 8 * $bufsize]
- set flags "-create -txn -home $testdir -log_buffer $bufsize \
- -log_max $maxsize"
-
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Main loop: every second or so, increment the db in a txn.
- puts "\t\tInitial Checkpoint"
- error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
-
- puts "\tRecd$tnum.b ($niter iterations):\
- Transaction-protected increment loop."
- for { set i 0 } { $i <= $niter } { incr i } {
- set str [random_data 4096 0 NOTHING]
- set data $i
- set bigdata $i$str
-
- # Put, in a txn.
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put \
- [$db put -txn $txn $key [chop_data $method $data]] 0
- error_check_good db_put \
- [$db put -txn $txn $bigkey [chop_data $method $bigdata]] 0
- error_check_good txn_commit [$txn commit] 0
-
- # We need to sleep before taking the timestamp to guarantee
- # that the timestamp is *after* this transaction commits.
- # Since the resolution of the system call used by Berkeley DB
- # is less than a second, rounding to the nearest second can
- # otherwise cause off-by-one errors in the test.
- tclsleep $sleep_time
-
- set timeof($i) [timestamp -r]
-
- # If an appropriate period has elapsed, checkpoint.
- if { $i % $ckpt_freq == $ckpt_freq - 1 } {
- puts "\t\tIteration $i: Checkpointing."
- error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
- }
-
- # Sleep again to ensure that the next operation definitely
- # occurs after the timestamp.
- tclsleep $sleep_time
- }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
-
- # Now, loop through and recover to each timestamp, verifying the
- # expected increment.
- puts "\tRecd$tnum.c: Recover to each timestamp and check."
- for { set i $niter } { $i >= 0 } { incr i -1 } {
-
- # Run db_recover.
- set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
- # puts $t
- berkdb debug_check
- set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
- error_check_good db_recover($i,$t,$r) $ret 0
-
- # Now open the db and check the timestamp.
- set db [eval {berkdb_open} $testdir/$testfile]
- error_check_good db_open($i) [is_valid_db $db] TRUE
-
- set dbt [$db get $key]
- set datum [lindex [lindex $dbt 0] 1]
- error_check_good timestamp_recover $datum [pad_data $method $i]
-
- error_check_good db_close [$db close] 0
- }
-
- # Finally, recover to a time well before the first timestamp
- # and well after the last timestamp. The latter should
- # be just like the timestamp of the last test performed;
- # the former should fail.
- puts "\tRecd$tnum.d: Recover to before the first timestamp."
- set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
- set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
- error_check_bad db_recover(before,$t) $ret 0
-
- puts "\tRecd$tnum.e: Recover to after the last timestamp."
- set t [clock format \
- [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
- set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
- error_check_good db_recover(after,$t) $ret 0
-
- # Now open the db and check the timestamp.
- set db [eval {berkdb_open} $testdir/$testfile]
- error_check_good db_open(after) [is_valid_db $db] TRUE
-
- set dbt [$db get $key]
- set datum2 [lindex [lindex $dbt 0] 1]
-
- error_check_good timestamp_recover $datum2 $datum
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/recd012.tcl b/storage/bdb/test/recd012.tcl
deleted file mode 100644
index abed99de809..00000000000
--- a/storage/bdb/test/recd012.tcl
+++ /dev/null
@@ -1,433 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd012.tcl,v 11.31 2004/04/19 14:56:13 bostic Exp $
-#
-# TEST recd012
-# TEST Test of log file ID management. [#2288]
-# TEST Test recovery handling of file opens and closes.
-proc recd012 { method {start 0} \
- {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
- source ./include.tcl
-
- set tnum "012"
- set pagesize 512
-
- if { $is_qnx_test } {
- set niter 40
- }
-
- puts "Recd$tnum $method ($args): Test recovery file management."
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd012: skipping for specific pagesizes"
- return
- }
-
- for { set i $start } { $i <= $niter } { incr i } {
- env_cleanup $testdir
-
- # For repeatability, we pass in the iteration number
- # as a parameter and use that in recd012_body to seed
- # the random number generator to randomize our operations.
- # This lets us re-run a potentially failing iteration
- # without having to start from the beginning and work
- # our way to it.
- #
- # The number of databases ranges from 4 to 8 and is
- # a function of $niter
- # set ndbs [expr ($i % 5) + 4]
-
- recd012_body \
- $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
- }
-}
-
-proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
- global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
- source ./include.tcl
-
- set largs [convert_args $method $largs]
- set omethod [convert_method $method]
-
- puts "\tRecd$tnum $method ($largs): Iteration $iter"
- puts "\t\tRecd$tnum.a: Create environment and $ndbs databases."
-
- # We run out of lockers during some of the recovery runs, so
- # we need to make sure that we specify a DB_CONFIG that will
- # give us enough lockers.
- set f [open $testdir/DB_CONFIG w]
- puts $f "set_lk_max_lockers 5000"
- close $f
-
- set flags "-create -txn -home $testdir"
- set env_cmd "berkdb_env $flags"
- error_check_good env_remove [berkdb envremove -home $testdir] 0
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # Initialize random number generator based on $iter.
- berkdb srand [expr $iter + $rand_init]
-
- # Initialize database that keeps track of number of open files (so
- # we don't run out of descriptors).
- set ofname of.db
- set txn [$dbenv txn]
- error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
- set ofdb [berkdb_open -env $dbenv -txn $txn\
- -create -dup -mode 0644 -btree -pagesize 512 $ofname]
- error_check_good of_open [is_valid_db $ofdb] TRUE
- error_check_good open_txn_commit [$txn commit] 0
- set oftxn [$dbenv txn]
- error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
- error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
- error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
- error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
- error_check_good of_txn_commit [$oftxn commit] 0
- error_check_good of_close [$ofdb close] 0
-
- # Create ndbs databases to work in, and a file listing db names to
- # pick from.
- set f [open $testdir/dblist w]
-
- set oflags "-auto_commit -env $dbenv \
- -create -mode 0644 -pagesize $psz $largs $omethod"
- for { set i 0 } { $i < $ndbs } { incr i } {
- # 50-50 chance of being a subdb, unless we're a queue.
- if { [berkdb random_int 0 1] || [is_queue $method] } {
- # not a subdb
- set dbname recd$tnum-$i.db
- } else {
- # subdb
- set dbname "recd$tnum-subdb.db s$i"
- }
- puts $f $dbname
- set db [eval berkdb_open $oflags $dbname]
- error_check_good db($i) [is_valid_db $db] TRUE
- error_check_good db($i)_close [$db close] 0
- }
- close $f
- error_check_good env_close [$dbenv close] 0
-
- # Now we get to the meat of things. Our goal is to do some number
- # of opens, closes, updates, and shutdowns (simulated here by a
- # close of all open handles and a close/reopen of the environment,
- # with or without an envremove), matching the regular expression
- #
- # ((O[OUC]+S)+R+V)
- #
- # We'll repeat the inner + a random number up to $niniter times,
- # and the outer + a random number up to $noutiter times.
- #
- # In order to simulate shutdowns, we'll perform the opens, closes,
- # and updates in a separate process, which we'll exit without closing
- # all handles properly. The environment will be left lying around
- # before we run recovery 50% of the time.
- set out [berkdb random_int 1 $noutiter]
- puts \
- "\t\tRecd$tnum.b: Performing $out recoveries of up to $niniter ops."
- for { set i 0 } { $i < $out } { incr i } {
- set child [open "|$tclsh_path" w]
-
- # For performance, don't source everything,
- # just what we'll need.
- puts $child "load $tcllib"
- puts $child "set fixed_len $fixed_len"
- puts $child "source $src_root/test/testutils.tcl"
- puts $child "source $src_root/test/recd$tnum.tcl"
-
- set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
-
- # Go.
- berkdb debug_check
- puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
- $ndbs $tnum $method $ofname $largs"
- close $child
-
- # Run recovery 0-3 times.
- set nrecs [berkdb random_int 0 3]
- for { set j 0 } { $j < $nrecs } { incr j } {
- berkdb debug_check
- set ret [catch {exec $util_path/db_recover \
- -h $testdir} res]
- if { $ret != 0 } {
- puts "FAIL: db_recover returned with nonzero\
- exit status, output as follows:"
- file mkdir /tmp/12out
- set fd [open /tmp/12out/[pid] w]
- puts $fd $res
- close $fd
- }
- error_check_good recover($j) $ret 0
- }
- }
-
- # Run recovery one final time; it doesn't make sense to
- # check integrity if we do not.
- set ret [catch {exec $util_path/db_recover -h $testdir} res]
- if { $ret != 0 } {
- puts "FAIL: db_recover returned with nonzero\
- exit status, output as follows:"
- puts $res
- }
-
- # Make sure each datum is the correct filename.
- puts "\t\tRecd$tnum.c: Checking data integrity."
- set dbenv [berkdb_env -create -private -home $testdir]
- error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
- set f [open $testdir/dblist r]
- set i 0
- while { [gets $f dbinfo] > 0 } {
- set db [eval berkdb_open -env $dbenv $dbinfo]
- error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
-
- set dbc [$db cursor]
- error_check_good cursor [is_valid_cursor $dbc $db] TRUE
-
- for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
- { set dbt [$dbc get -next] } {
- error_check_good integrity [lindex [lindex $dbt 0] 1] \
- [pad_data $method $dbinfo]
- }
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- }
- close $f
- error_check_good env_close_integrity [$dbenv close] 0
-
- # Verify
- error_check_good verify \
- [verify_dir $testdir "\t\tRecd$tnum.d: " 0 0 1] 0
-}
-
-proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
- ofname args } {
- global recd012_ofkey
- source ./include.tcl
- if { [is_record_based $method] } {
- set keybase ""
- } else {
- set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
- }
-
- # Initialize our random number generator, repeatably based on an arg.
- berkdb srand $rnd
-
- # Open our env.
- set dbenv [eval $env_cmd]
- error_check_good env_open [is_valid_env $dbenv] TRUE
-
- # Find out how many databases appear to be open in the log--we
- # don't want recovery to run out of filehandles.
- set txn [$dbenv txn]
- error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
- set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
- error_check_good child_txn_commit [$txn commit] 0
-
- set oftxn [$dbenv txn]
- error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
- set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
- error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
- set nopenfiles [lindex [lindex $dbt 0] 1]
-
- error_check_good of_commit [$oftxn commit] 0
-
- # Read our dbnames
- set f [open $testdir/dblist r]
- set i 0
- while { [gets $f dbname($i)] > 0 } {
- incr i
- }
- close $f
-
- # We now have $ndbs extant databases.
- # Open one of them, just to get us started.
- set opendbs {}
- set oflags "-env $dbenv $args"
-
- # Start a transaction, just to get us started.
- set curtxn [$dbenv txn]
- error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
-
- # Inner loop. Do $in iterations of a random open, close, or
- # update, where $in is between 1 and $niniter.
- set in [berkdb random_int 1 $niniter]
- for { set j 0 } { $j < $in } { incr j } {
- set op [berkdb random_int 0 2]
- switch $op {
- 0 {
- # Open.
- recd012_open
- }
- 1 {
- # Update. Put random-number$keybase as key,
- # filename as data, into random database.
- set num_open [llength $opendbs]
- if { $num_open == 0 } {
- # If none are open, do an open first.
- recd012_open
- set num_open [llength $opendbs]
- }
- set n [berkdb random_int 0 [expr $num_open - 1]]
- set pair [lindex $opendbs $n]
- set udb [lindex $pair 0]
- set uname [lindex $pair 1]
-
- set key [berkdb random_int 1000 1999]$keybase
- set data [chop_data $method $uname]
- error_check_good put($uname,$udb,$key,$data) \
- [$udb put -txn $curtxn $key $data] 0
-
- # One time in four, commit the transaction.
- if { [berkdb random_int 0 3] == 0 && 0 } {
- error_check_good txn_recommit \
- [$curtxn commit] 0
- set curtxn [$dbenv txn]
- error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
- }
- }
- 2 {
- # Close.
- if { [llength $opendbs] == 0 } {
- # If none are open, open instead of closing.
- recd012_open
- continue
- }
-
- # Commit curtxn first, lest we self-deadlock.
- error_check_good txn_recommit [$curtxn commit] 0
-
- # Do it.
- set which [berkdb random_int 0 \
- [expr [llength $opendbs] - 1]]
-
- set db [lindex [lindex $opendbs $which] 0]
- error_check_good db_choice [is_valid_db $db] TRUE
- global errorCode errorInfo
-
- error_check_good db_close \
- [[lindex [lindex $opendbs $which] 0] close] 0
-
- set opendbs [lreplace $opendbs $which $which]
- incr nopenfiles -1
-
- # Reopen txn.
- set curtxn [$dbenv txn]
- error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
- }
- }
-
- # One time in two hundred, checkpoint.
- if { [berkdb random_int 0 199] == 0 } {
- puts "\t\t\tRecd$tnum:\
- Random checkpoint after operation $outiter.$j."
- error_check_good txn_ckpt \
- [$dbenv txn_checkpoint] 0
- set nopenfiles \
- [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
- }
- }
-
- # We have to commit curtxn. It'd be kind of nice not to, but
- # if we start in again without running recovery, we may block
- # ourselves.
- error_check_good curtxn_commit [$curtxn commit] 0
-
- # Put back the new number of open files.
- set oftxn [$dbenv txn]
- error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
- error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
- error_check_good of_put \
- [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
- error_check_good of_commit [$oftxn commit] 0
- error_check_good ofdb_close [$ofdb close] 0
-}
-
-proc recd012_open { } {
- # This is basically an inline and has to modify curtxn,
- # so use upvars.
- upvar curtxn curtxn
- upvar ndbs ndbs
- upvar dbname dbname
- upvar dbenv dbenv
- upvar oflags oflags
- upvar opendbs opendbs
- upvar nopenfiles nopenfiles
-
- # Return without an open if we've already opened too many files--
- # we don't want to make recovery run out of filehandles.
- if { $nopenfiles > 30 } {
- #puts "skipping--too many open files"
- return -code break
- }
-
- # Commit curtxn first, lest we self-deadlock.
- error_check_good txn_recommit \
- [$curtxn commit] 0
-
- # Do it.
- set which [berkdb random_int 0 [expr $ndbs - 1]]
-
- set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
-
- lappend opendbs [list $db $dbname($which)]
-
- # Reopen txn.
- set curtxn [$dbenv txn]
- error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
-
- incr nopenfiles
-}
-
-# Update the database containing the number of files that db_recover has
-# to contend with--we want to avoid letting it run out of file descriptors.
-# We do this by keeping track of the number of unclosed opens since the
-# checkpoint before last.
-# $recd012_ofkey stores this current value; the two dups available
-# at $recd012_ofckptkey store the number of opens since the last checkpoint
-# previous.
-# Thus, if the current value is 17 when we do a checkpoint, and the
-# stored values are 3 and 8, the new current value (which we return)
-# is 14, and the new stored values are 8 and 6.
-proc recd012_nopenfiles_ckpt { env db nopenfiles } {
- global recd012_ofckptkey
- set txn [$env txn]
- error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
-
- set dbc [$db cursor -txn $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # Get the first ckpt value and delete it.
- set dbt [$dbc get -set $recd012_ofckptkey]
- error_check_good set [llength $dbt] 1
-
- set discard [lindex [lindex $dbt 0] 1]
- error_check_good del [$dbc del] 0
-
- set nopenfiles [expr $nopenfiles - $discard]
-
- # Get the next ckpt value
- set dbt [$dbc get -nextdup]
- error_check_good set2 [llength $dbt] 1
-
- # Calculate how many opens we've had since this checkpoint before last.
- set onlast [lindex [lindex $dbt 0] 1]
- set sincelast [expr $nopenfiles - $onlast]
-
- # Put this new number at the end of the dup set.
- error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
-
- # We should never deadlock since we're the only one in this db.
- error_check_good dbc_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
-
- return $nopenfiles
-}
-
-# globals -- it's not worth passing these around, as they're constants
-set recd012_ofkey OPENFILES
-set recd012_ofckptkey CKPTS
diff --git a/storage/bdb/test/recd013.tcl b/storage/bdb/test/recd013.tcl
deleted file mode 100644
index 36cad9eb497..00000000000
--- a/storage/bdb/test/recd013.tcl
+++ /dev/null
@@ -1,285 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd013.tcl,v 11.22 2004/09/20 17:06:15 sue Exp $
-#
-# TEST recd013
-# TEST Test of cursor adjustment on child transaction aborts. [#2373]
-#
-# XXX
-# Other tests that cover more specific variants of the same issue
-# are in the access method tests for now. This is probably wrong; we
-# put this one here because they're closely based on and intertwined
-# with other, non-transactional cursor stability tests that are among
-# the access method tests, and because we need at least one test to
-# fit under recd and keep logtrack from complaining. We'll sort out the mess
-# later; the important thing, for now, is that everything that needs to gets
-# tested. (This really shouldn't be under recd at all, since it doesn't
-# run recovery!)
-proc recd013 { method { nitems 100 } args } {
- source ./include.tcl
- global alphabet log_log_record_types
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "013"
- set pgsz 512
-
- puts "Recd$tnum $method ($args): Test of aborted cursor adjustments."
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd013: skipping for specific pagesizes"
- return
- }
-
- set testfile recd$tnum.db
- env_cleanup $testdir
-
- set i 0
- if { [is_record_based $method] == 1 } {
- set keybase ""
- } else {
- set keybase "key"
- }
-
- puts "\tRecd$tnum.a:\
- Create environment, database, and parent transaction."
- set flags "-create -txn -home $testdir"
-
- set env_cmd "berkdb_env $flags"
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- set oflags \
- "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Create a database containing $nitems items, numbered with odds.
- # We'll then put the even numbers during the body of the test.
- set txn [$env txn]
- error_check_good init_txn [is_valid_txn $txn $env] TRUE
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- set key $keybase$i
- set data [chop_data $method $i$alphabet]
-
- # First, try to put the item in a child transaction,
- # then abort and verify all the cursors we've done up until
- # now.
- set ctxn [$env txn -parent $txn]
- error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
- error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
- error_check_good ctxn_abort($i) [$ctxn abort] 0
- for { set j 1 } { $j < $i } { incr j 2 } {
- error_check_good dbc_get($j):1 [$dbc($j) get -current] \
- [list [list $keybase$j \
- [pad_data $method $j$alphabet]]]
- }
-
- # Then put for real.
- error_check_good init_put($i) [$db put -txn $txn $key $data] 0
-
- # Set a cursor of the parent txn to each item.
- set dbc($i) [$db cursor -txn $txn]
- error_check_good dbc_getset($i) \
- [$dbc($i) get -set $key] \
- [list [list $keybase$i [pad_data $method $i$alphabet]]]
-
- # And verify all the cursors, including the one we just
- # created.
- for { set j 1 } { $j <= $i } { incr j 2 } {
- error_check_good dbc_get($j):2 [$dbc($j) get -current] \
- [list [list $keybase$j \
- [pad_data $method $j$alphabet]]]
- }
- }
-
- puts "\t\tRecd$tnum.a.1: Verify cursor stability after init."
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- error_check_good dbc_get($i):3 [$dbc($i) get -current] \
- [list [list $keybase$i [pad_data $method $i$alphabet]]]
- }
-
- puts "\tRecd$tnum.b: Put test."
- puts "\t\tRecd$tnum.b.1: Put items."
- set ctxn [$env txn -parent $txn]
- error_check_good txn [is_valid_txn $ctxn $env] TRUE
- for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
- set key $keybase$i
- set data [chop_data $method $i$alphabet]
- error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
-
- # If we're a renumbering recno, this is uninteresting.
- # Stir things up by putting a few additional records at
- # the beginning.
- if { [is_rrecno $method] == 1 } {
- set curs [$db cursor -txn $ctxn]
- error_check_bad llength_get_first \
- [llength [$curs get -first]] 0
- error_check_good cursor [is_valid_cursor $curs $db] TRUE
- # expect a recno!
- error_check_good rrecno_put($i) \
- [$curs put -before ADDITIONAL.$i] 1
- error_check_good curs_close [$curs close] 0
- }
- }
-
- puts "\t\tRecd$tnum.b.2: Verify cursor stability after abort."
- error_check_good ctxn_abort [$ctxn abort] 0
-
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- error_check_good dbc_get($i):4 [$dbc($i) get -current] \
- [list [list $keybase$i [pad_data $method $i$alphabet]]]
- }
-
- # Clean up cursors.
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- error_check_good dbc($i)_close [$dbc($i) close] 0
- }
-
- # Sync and verify.
- error_check_good txn_commit [$txn commit] 0
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
-
- error_check_good db_sync [$db sync] 0
- error_check_good db_verify \
- [verify_dir $testdir "\t\tRecd$tnum.b.3: "] 0
-
- # Now put back all the even records, this time in the parent.
- # Commit and re-begin the transaction so we can abort and
- # get back to a nice full database.
- for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
- set key $keybase$i
- set data [chop_data $method $i$alphabet]
- error_check_good child_put($i) [$db put -txn $txn $key $data] 0
- }
- error_check_good txn_commit [$txn commit] 0
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
-
- # Delete test. Set a cursor to each record. Delete the even ones
- # in the parent and check cursor stability. Then open a child
- # transaction, and delete the odd ones. Verify that the database
- # is empty.
- puts "\tRecd$tnum.c: Delete test."
- unset dbc
-
- # Create cursors pointing at each item.
- for { set i 1 } { $i <= 2 * $nitems } { incr i } {
- set dbc($i) [$db cursor -txn $txn]
- error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
- TRUE
- error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
- [list [list $keybase$i [pad_data $method $i$alphabet]]]
- }
-
- puts "\t\tRecd$tnum.c.1: Delete even items in child txn and abort."
-
- if { [is_rrecno $method] != 1 } {
- set init 2
- set bound [expr 2 * $nitems]
- set step 2
- } else {
- # In rrecno, deletes will renumber the items, so we have
- # to take that into account when we delete by recno.
- set init 2
- set bound [expr $nitems + 1]
- set step 1
- }
-
- set ctxn [$env txn -parent $txn]
- for { set i $init } { $i <= $bound } { incr i $step } {
- error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
- }
- error_check_good ctxn_abort [$ctxn abort] 0
-
- # Verify that no items are deleted.
- for { set i 1 } { $i <= 2 * $nitems } { incr i } {
- error_check_good dbc_get($i):5 [$dbc($i) get -current] \
- [list [list $keybase$i [pad_data $method $i$alphabet]]]
- }
-
- puts "\t\tRecd$tnum.c.2: Delete even items in child txn and commit."
- set ctxn [$env txn -parent $txn]
- for { set i $init } { $i <= $bound } { incr i $step } {
- error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
- }
- error_check_good ctxn_commit [$ctxn commit] 0
-
- # Verify that even items are deleted and odd items are not.
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- if { [is_rrecno $method] != 1 } {
- set j $i
- } else {
- set j [expr ($i - 1) / 2 + 1]
- }
- error_check_good dbc_get($i):6 [$dbc($i) get -current] \
- [list [list $keybase$j [pad_data $method $i$alphabet]]]
- }
- for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
- error_check_good dbc_get($i):7 [$dbc($i) get -current] ""
- }
-
- puts "\t\tRecd$tnum.c.3: Delete odd items in child txn."
-
- set ctxn [$env txn -parent $txn]
-
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- if { [is_rrecno $method] != 1 } {
- set j $i
- } else {
- # If this is an rrecno, just delete the first
- # item repeatedly--the renumbering will make
- # that delete everything.
- set j 1
- }
- error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
- }
-
- # Verify that everyone's deleted.
- for { set i 1 } { $i <= 2 * $nitems } { incr i } {
- error_check_good get_deleted($i) \
- [llength [$db get -txn $ctxn $keybase$i]] 0
- }
-
- puts "\t\tRecd$tnum.c.4: Verify cursor stability after abort."
- error_check_good ctxn_abort [$ctxn abort] 0
-
- # Verify that even items are deleted and odd items are not.
- for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- if { [is_rrecno $method] != 1 } {
- set j $i
- } else {
- set j [expr ($i - 1) / 2 + 1]
- }
- error_check_good dbc_get($i):8 [$dbc($i) get -current] \
- [list [list $keybase$j [pad_data $method $i$alphabet]]]
- }
- for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
- error_check_good dbc_get($i):9 [$dbc($i) get -current] ""
- }
-
- # Clean up cursors.
- for { set i 1 } { $i <= 2 * $nitems } { incr i } {
- error_check_good dbc($i)_close [$dbc($i) close] 0
- }
-
- # Sync and verify.
- error_check_good db_sync [$db sync] 0
- error_check_good db_verify \
- [verify_dir $testdir "\t\tRecd$tnum.c.5: "] 0
-
- puts "\tRecd$tnum.d: Clean up."
- error_check_good txn_commit [$txn commit] 0
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
- error_check_good verify_dir \
- [verify_dir $testdir "\t\tRecd$tnum.d.1: "] 0
-
- if { $log_log_record_types == 1 } {
- logtrack_read $testdir
- }
-}
diff --git a/storage/bdb/test/recd014.tcl b/storage/bdb/test/recd014.tcl
deleted file mode 100644
index da9207cb008..00000000000
--- a/storage/bdb/test/recd014.tcl
+++ /dev/null
@@ -1,445 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd014.tcl,v 1.21 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST recd014
-# TEST This is a recovery test for create/delete of queue extents. We
-# TEST then need to recover and make sure the file is correctly existing
-# TEST or not, as the case may be.
-proc recd014 { method args} {
- global fixed_len
- source ./include.tcl
-
- if { ![is_queueext $method] == 1 } {
- puts "Recd014: Skipping for method $method"
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd014: skipping for specific pagesizes"
- return
- }
-
- set orig_fixed_len $fixed_len
- #
- # We will use 512-byte pages, to be able to control
- # when extents get created/removed.
- #
- set fixed_len 300
-
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
- #
- # We want to set -extent 1 instead of what
- # convert_args gave us.
- #
- set exti [lsearch -exact $opts "-extent"]
- incr exti
- set opts [lreplace $opts $exti $exti 1]
-
- puts "Recd014: $method extent creation/deletion tests"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set testfile recd014.db
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd014.a: creating environment"
- set env_cmd "berkdb_env $flags"
-
- puts "\tRecd014.b: Create test commit"
- ext_recover_create $testdir $env_cmd $omethod \
- $opts $testfile commit
- puts "\tRecd014.b: Create test abort"
- ext_recover_create $testdir $env_cmd $omethod \
- $opts $testfile abort
-
- puts "\tRecd014.c: Consume test commit"
- ext_recover_consume $testdir $env_cmd $omethod \
- $opts $testfile commit
- puts "\tRecd014.c: Consume test abort"
- ext_recover_consume $testdir $env_cmd $omethod \
- $opts $testfile abort
-
- set fixed_len $orig_fixed_len
- puts "\tRecd014.d: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-}
-
-proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
- global log_log_record_types
- global fixed_len
- global alphabet
- source ./include.tcl
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- env_cleanup $dir
- # Open the environment and set the copy/abort locations
- set env [eval $env_cmd]
-
- set init_file $dir/$dbfile.init
- set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
- set oflags "-env $env $noenvflags"
-
- set t [$env txn]
- error_check_good txn_begin [is_valid_txn $t $env] TRUE
-
- set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
- error_check_good txn_commit [$t commit] 0
-
- set t [$env txn]
- error_check_good txn_begin [is_valid_txn $t $env] TRUE
-
- #
- # The command to execute to create an extent is a put.
- # We are just creating the first one, so our extnum is 0.
- #
- set extnum 0
- set data [chop_data $method [replicate $alphabet 512]]
- puts "\t\tExecuting command"
- set putrecno [$db put -txn $t -append $data]
- error_check_good db_put $putrecno 1
-
- # Sync the db so any changes to the file that are
- # in mpool get written to the disk file before the
- # diff.
- puts "\t\tSyncing"
- error_check_good db_sync [$db sync] 0
-
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
- copy_extent_file $dir $dbfile afterop
-
- error_check_good txn_$txncmd:$t [$t $txncmd] 0
- #
- # If we don't abort, then we expect success.
- # If we abort, we expect no file created.
- #
- set dbq [make_ext_filename $dir $dbfile $extnum]
- error_check_good extput:exists1 [file exists $dbq] 1
- set ret [$db get $putrecno]
- if {$txncmd == "abort"} {
- #
- # Operation was aborted. Verify our entry is not there.
- #
- puts "\t\tCommand executed and aborted."
- error_check_good db_get [llength $ret] 0
- } else {
- #
- # Operation was committed, verify it exists.
- #
- puts "\t\tCommand executed and committed."
- error_check_good db_get [llength $ret] 1
- catch { file copy -force $dir/$dbfile $init_file } res
- copy_extent_file $dir $dbfile init
- }
- set t [$env txn]
- error_check_good txn_begin [is_valid_txn $t $env] TRUE
- error_check_good db_close [$db close] 0
- error_check_good txn_commit [$t commit] 0
- error_check_good env_close [$env close] 0
-
- #
- # Run recovery here. Should be a no-op. Verify that
- # the file still does/n't exist when we are done.
- #
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (no-op) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- #
- # Verify it did not change.
- #
- error_check_good extput:exists2 [file exists $dbq] 1
- ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
-
- #
- # Need a new copy to get the right LSN into the file.
- #
- catch { file copy -force $dir/$dbfile $init_file } res
- copy_extent_file $dir $dbfile init
-
- #
- # Undo.
- # Now move the .afterop file to $dbfile. Run recovery again.
- #
- file copy -force $dir/$dbfile.afterop $dir/$dbfile
- move_file_extent $dir $dbfile afterop copy
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (afterop) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
-
- #
- # To redo, remove the dbfiles. Run recovery again.
- #
- catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
- copy_extent_file $dir $dbfile renamed rename
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (init) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- #
- # !!!
- # Even though db_recover exits with status 0, it should print out
- # a warning because the file didn't exist. Db_recover writes this
- # to stderr. Tcl assumes that ANYTHING written to stderr is an
- # error, so even though we exit with 0 status, we still get an
- # error back from 'catch'. Look for the warning.
- #
- if { $stat == 1 && [is_substr $result "warning"] == 0 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
-
- #
- # Verify it was redone. However, since we removed the files
- # to begin with, recovery with abort will not recreate the
- # extent. Recovery with commit will.
- #
- if {$txncmd == "abort"} {
- error_check_good extput:exists3 [file exists $dbq] 0
- } else {
- error_check_good extput:exists3 [file exists $dbq] 1
- }
-}
-
-proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
- if { $txncmd == "commit" } {
- #
- # Operation was committed. Verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff "-dar" $init_file $dir $dbfile] 0
- } else {
- #
- # Operation aborted. The file is there, but make
- # sure the item is not.
- #
- set xdb [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $xdb] TRUE
- set ret [$xdb get $putrecno]
- error_check_good db_get [llength $ret] 0
- error_check_good db_close [$xdb close] 0
- }
-}
-
-proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
- global log_log_record_types
- global alphabet
- source ./include.tcl
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- env_cleanup $dir
- # Open the environment and set the copy/abort locations
- set env [eval $env_cmd]
-
- set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
- -env $env $opts $dbfile"
-
- #
- # Open our db, add some data, close and copy as our
- # init file.
- #
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set extnum 0
- set data [chop_data $method [replicate $alphabet 512]]
-
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set putrecno [$db put -txn $txn -append $data]
- error_check_good db_put $putrecno 1
- error_check_good commit [$txn commit] 0
- error_check_good db_close [$db close] 0
-
- puts "\t\tExecuting command"
-
- set init_file $dir/$dbfile.init
- catch { file copy -force $dir/$dbfile $init_file } res
- copy_extent_file $dir $dbfile init
-
- #
- # If we don't abort, then we expect success.
- # If we abort, we expect no file removed until recovery is run.
- #
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set t [$env txn]
- error_check_good txn_begin [is_valid_txn $t $env] TRUE
-
- set dbcmd "$db get -txn $t -consume"
- set ret [eval $dbcmd]
- error_check_good db_sync [$db sync] 0
-
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
- copy_extent_file $dir $dbfile afterop
-
- error_check_good txn_$txncmd:$t [$t $txncmd] 0
- error_check_good db_sync [$db sync] 0
- set dbq [make_ext_filename $dir $dbfile $extnum]
- if {$txncmd == "abort"} {
- #
- # Operation was aborted, verify ext did not change.
- #
- puts "\t\tCommand executed and aborted."
-
- #
- # Check that the file exists. Final state.
- # Since we aborted the txn, we should be able
- # to get to our original entry.
- #
- error_check_good postconsume.1 [file exists $dbq] 1
- error_check_good \
- diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff "-dar" $init_file $dir $dbfile] 0
- } else {
- #
- # Operation was committed, verify it does
- # not exist.
- #
- puts "\t\tCommand executed and committed."
- #
- # Check file existence. Consume operations remove
- # the extent when we move off, which we should have
- # done.
- error_check_good consume_exists [file exists $dbq] 0
- }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
-
- #
- # Run recovery here on what we ended up with. Should be a no-op.
- #
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (no-op) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- if { $txncmd == "abort"} {
- #
- # Operation was aborted, verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff "-dar" $init_file $dir $dbfile] 0
- } else {
- #
- # Operation was committed, verify it does
- # not exist. Both operations should result
- # in no file existing now that we've run recovery.
- #
- error_check_good after_recover1 [file exists $dbq] 0
- }
-
- #
- # Run recovery here. Re-do the operation.
- # Verify that the file doesn't exist
- # (if we committed) or change (if we aborted)
- # when we are done.
- #
- catch { file copy -force $dir/$dbfile $init_file } res
- copy_extent_file $dir $dbfile init
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (init) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
- if { $txncmd == "abort"} {
- #
- # Operation was aborted, verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff "-dar" $init_file $dir $dbfile] 0
- } else {
- #
- # Operation was committed, verify it does
- # not exist. Both operations should result
- # in no file existing now that we've run recovery.
- #
- error_check_good after_recover2 [file exists $dbq] 0
- }
-
- #
- # Now move the .afterop file to $dbfile. Run recovery again.
- #
- set filecopy [glob $dir/*.afterop]
- set afterop [lindex $filecopy 0]
- file rename -force $afterop $dir/$dbfile
- set afterop [string range $afterop \
- [expr [string last "/" $afterop] + 1] \
- [string last "." $afterop]]
- move_file_extent $dir $dbfile afterop rename
-
- berkdb debug_check
- puts -nonewline "\t\tAbout to run recovery (afterop) ... "
- flush stdout
-
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- return
- }
- puts "complete"
-
- if { $txncmd == "abort"} {
- #
- # Operation was aborted, verify it did not change.
- #
- error_check_good \
- diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff "-dar" $init_file $dir $dbfile] 0
- } else {
- #
- # Operation was committed, verify it still does
- # not exist.
- #
- error_check_good after_recover3 [file exists $dbq] 0
- }
-}
diff --git a/storage/bdb/test/recd015.tcl b/storage/bdb/test/recd015.tcl
deleted file mode 100644
index afe0ac8833b..00000000000
--- a/storage/bdb/test/recd015.tcl
+++ /dev/null
@@ -1,162 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd015.tcl,v 1.16 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST recd015
-# TEST This is a recovery test for testing lots of prepared txns.
-# TEST This test is to force the use of txn_recover to call with the
-# TEST DB_FIRST flag and then DB_NEXT.
-proc recd015 { method args } {
- source ./include.tcl
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd015: $method ($args) prepared txns test"
-
- # Create the database and environment.
-
- set numtxns 1
- set testfile NULL
-
- set env_cmd "berkdb_env -create -txn -home $testdir"
- set msg "\tRecd015.a"
- puts "$msg Simple test to prepare $numtxns txn "
- foreach op { abort commit discard } {
- env_cleanup $testdir
- recd015_body $env_cmd $testfile $numtxns $msg $op
- }
-
- #
- # Now test large numbers of prepared txns to test DB_NEXT
- # on txn_recover.
- #
- set numtxns 250
- set testfile recd015.db
- set txnmax [expr $numtxns + 5]
- #
- # For this test we create our database ahead of time so that we
- # don't need to send methods and args to the script.
- #
- env_cleanup $testdir
- set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $env] TRUE
- set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
- error_check_good envclose [$env close] 0
-
- set msg "\tRecd015.b"
- puts "$msg Large test to prepare $numtxns txn "
- foreach op { abort commit discard } {
- recd015_body $env_cmd $testfile $numtxns $msg $op
- }
-
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $testdir/LOG } ret]
- error_check_good db_printlog $stat 0
- fileremove $testdir/LOG
-}
-
-proc recd015_body { env_cmd testfile numtxns msg op } {
- source ./include.tcl
-
- sentinel_init
- set gidf $testdir/gidfile
- fileremove -f $gidf
- set pidlist {}
- puts "$msg.0: Executing child script to prepare txns"
- berkdb debug_check
- set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
- $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
-
- lappend pidlist $p
- watch_procs $pidlist 5
- set f1 [open $testdir/recdout r]
- set r [read $f1]
- puts $r
- close $f1
- fileremove -f $testdir/recdout
-
- berkdb debug_check
- puts -nonewline "$msg.1: Running recovery ... "
- flush stdout
- berkdb debug_check
- set env [eval $env_cmd -recover]
- error_check_good dbenv-recover [is_valid_env $env] TRUE
- puts "complete"
-
- puts "$msg.2: getting txns from txn_recover"
- set txnlist [$env txn_recover]
- error_check_good txnlist_len [llength $txnlist] $numtxns
-
- set gfd [open $gidf r]
- set i 0
- while { [gets $gfd gid] != -1 } {
- set gids($i) $gid
- incr i
- }
- close $gfd
- #
- # Make sure we have as many as we expect
- error_check_good num_gids $i $numtxns
-
- set i 0
- puts "$msg.3: comparing GIDs and $op txns"
- foreach tpair $txnlist {
- set txn [lindex $tpair 0]
- set gid [lindex $tpair 1]
- error_check_good gidcompare $gid $gids($i)
- error_check_good txn:$op [$txn $op] 0
- incr i
- }
- if { $op != "discard" } {
- error_check_good envclose [$env close] 0
- return
- }
- #
- # If we discarded, now do it again and randomly resolve some
- # until all txns are resolved.
- #
- puts "$msg.4: resolving/discarding txns"
- set txnlist [$env txn_recover]
- set len [llength $txnlist]
- set opval(1) "abort"
- set opcnt(1) 0
- set opval(2) "commit"
- set opcnt(2) 0
- set opval(3) "discard"
- set opcnt(3) 0
- while { $len != 0 } {
- set opicnt(1) 0
- set opicnt(2) 0
- set opicnt(3) 0
- #
- # Abort/commit or discard them randomly until
- # all are resolved.
- #
- for { set i 0 } { $i < $len } { incr i } {
- set t [lindex $txnlist $i]
- set txn [lindex $t 0]
- set newop [berkdb random_int 1 3]
- set ret [$txn $opval($newop)]
- error_check_good txn_$opval($newop):$i $ret 0
- incr opcnt($newop)
- incr opicnt($newop)
- }
-# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
-# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
-# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
-
- set txnlist [$env txn_recover]
- set len [llength $txnlist]
- }
-
- error_check_good envclose [$env close] 0
-}
diff --git a/storage/bdb/test/recd016.tcl b/storage/bdb/test/recd016.tcl
deleted file mode 100644
index 6f0d3d132f2..00000000000
--- a/storage/bdb/test/recd016.tcl
+++ /dev/null
@@ -1,179 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd016.tcl,v 11.13 2004/07/07 19:08:21 carol Exp $
-#
-# TEST recd016
-# TEST Test recovery after checksum error.
-proc recd016 { method args} {
- global fixed_len
- global log_log_record_types
- global datastr
- source ./include.tcl
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Recd016: skipping for specific pagesizes"
- return
- }
- if { [is_queueext $method] == 1 } {
- puts "Recd016: skipping for method $method"
- return
- }
-
- puts "Recd016: $method recovery after checksum error"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set testfile recd016.db
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd016.a: creating environment"
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set pgsize 512
- set orig_fixed_len $fixed_len
- set fixed_len [expr $pgsize / 4]
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
- set oflags "-create $omethod -mode 0644 \
- -auto_commit -chksum -pagesize $pgsize $opts $testfile"
- set db [eval {berkdb_open} -env $dbenv $oflags]
-
- #
- # Put some data.
- #
- set nument 50
- puts "\tRecd016.b: Put some data"
- for { set i 1 } { $i <= $nument } { incr i } {
- # Use 'i' as key so method doesn't matter
- set key $i
- set data $i$datastr
-
- # Put, in a txn.
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put \
- [$db put -txn $txn $key [chop_data $method $data]] 0
- error_check_good txn_commit [$txn commit] 0
- }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
- #
- # We need to remove the env so that we don't get cached
- # pages.
- #
- error_check_good env_remove [berkdb envremove -home $testdir] 0
-
- puts "\tRecd016.c: Overwrite part of database"
- #
- # First just touch some bits in the file. We want to go
- # through the paging system, so touch some data pages,
- # like the middle of page 2.
- # We should get a checksum error for the checksummed file.
- #
- set pg 2
- set fid [open $testdir/$testfile r+]
- fconfigure $fid -translation binary
- set seeklen [expr $pgsize * $pg + 200]
- seek $fid $seeklen start
- set byte [read $fid 1]
- binary scan $byte c val
- set newval [expr ~$val]
- set newbyte [binary format c $newval]
- seek $fid $seeklen start
- puts -nonewline $fid $newbyte
- close $fid
-
- #
- # Verify we get the checksum error. When we get it, it should
- # log the error as well, so when we run recovery we'll need to
- # do catastrophic recovery. We do this in a sub-process so that
- # the files are closed after the panic.
- #
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
-
- set env_cmd "berkdb_env_noerr $flags"
- set dbenv [send_cmd $f1 $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
- error_check_good db [is_valid_db $db] TRUE
-
- # We need to set non-blocking mode so that after each command
- # we can read all the remaining output from that command and
- # we can know what the output from one command is.
- fconfigure $f1 -blocking 0
- set ret [read $f1]
- set got_err 0
- for { set i 1 } { $i <= $nument } { incr i } {
- set stat [send_cmd $f1 "catch {$db get $i} r"]
- set getret [send_cmd $f1 "puts \$r"]
- set ret [read $f1]
- if { $stat == 1 } {
- error_check_good dbget:fail [is_substr $getret \
- "checksum error: page $pg"] 1
- set got_err 1
- break
- } else {
- set key [lindex [lindex $getret 0] 0]
- set data [lindex [lindex $getret 0] 1]
- error_check_good keychk $key $i
- error_check_good datachk $data \
- [pad_data $method $i$datastr]
- }
- }
- error_check_good got_chksum $got_err 1
- set ret [send_cmd $f1 "$db close"]
- set extra [read $f1]
- error_check_good db:fail [is_substr $ret "run recovery"] 1
-
- set ret [send_cmd $f1 "$dbenv close"]
- error_check_good env_close:fail [is_substr $ret "run recovery"] 1
- close $f1
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $testdir
- }
-
- puts "\tRecd016.d: Run normal recovery"
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 1
- error_check_good dbrec:fail \
- [is_substr $r "checksum error"] 1
-
- catch {fileremove $testdir/$testfile} ret
- puts "\tRecd016.e: Run catastrophic recovery"
- set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
- error_check_good db_recover $ret 0
-
- #
- # Now verify the data was reconstructed correctly.
- #
- set env_cmd "berkdb_env_noerr $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set db [eval {berkdb_open} -env $dbenv $oflags]
- error_check_good db [is_valid_db $db] TRUE
-
- for { set i 1 } { $i <= $nument } { incr i } {
- set stat [catch {$db get $i} ret]
- error_check_good stat $stat 0
- set key [lindex [lindex $ret 0] 0]
- set data [lindex [lindex $ret 0] 1]
- error_check_good keychk $key $i
- error_check_good datachk $data [pad_data $method $i$datastr]
- }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$dbenv close] 0
- set fixed_len $orig_fixed_len
- return
-}
diff --git a/storage/bdb/test/recd017.tcl b/storage/bdb/test/recd017.tcl
deleted file mode 100644
index 43dfb642194..00000000000
--- a/storage/bdb/test/recd017.tcl
+++ /dev/null
@@ -1,158 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd017.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST recd017
-# TEST Test recovery and security. This is basically a watered
-# TEST down version of recd001 just to verify that encrypted environments
-# TEST can be recovered.
-proc recd017 { method {select 0} args} {
- global fixed_len
- global encrypt
- global passwd
- global has_crypto
- source ./include.tcl
-
- # Skip test if release does not support encryption.
- if { $has_crypto == 0 } {
- puts "Skipping recd017 for non-crypto release."
- return
- }
-
- set orig_fixed_len $fixed_len
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd017: $method operation/transaction tests"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- # The recovery tests were originally written to
- # do a command, abort, do it again, commit, and then
- # repeat the sequence with another command. Each command
- # tends to require that the previous command succeeded and
- # left the database a certain way. To avoid cluttering up the
- # op_recover interface as well as the test code, we create two
- # databases; one does abort and then commit for each op, the
- # other does prepare, prepare-abort, and prepare-commit for each
- # op. If all goes well, this allows each command to depend
- # exactly one successful iteration of the previous command.
- set testfile recd017.db
- set testfile2 recd017-2.db
-
- set flags "-create -encryptaes $passwd -txn -home $testdir"
-
- puts "\tRecd017.a.0: creating environment"
- set env_cmd "berkdb_env $flags"
- convert_encrypt $env_cmd
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- #
- # We need to create a database to get the pagesize (either
- # the default or whatever might have been specified).
- # Then remove it so we can compute fixed_len and create the
- # real database.
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -encrypt $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set stat [$db stat]
- #
- # Compute the fixed_len based on the pagesize being used.
- # We want the fixed_len to be 1/4 the pagesize.
- #
- set pg [get_pagesize $stat]
- error_check_bad get_pagesize $pg -1
- set fixed_len [expr $pg / 4]
- error_check_good db_close [$db close] 0
- error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
-
- # Convert the args again because fixed_len is now real.
- # Create the databases and close the environment.
- # cannot specify db truncate in txn protected env!!!
- set opts [convert_args $method ""]
- convert_encrypt $env_cmd
- set omethod [convert_method $method]
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -encrypt $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -encrypt $opts $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- error_check_good env_close [$dbenv close] 0
-
- puts "\tRecd017.a.1: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
-
- # List of recovery tests: {CMD MSG} pairs.
- set rlist {
- { {DB put -txn TXNID $key $data} "Recd017.b: put"}
- { {DB del -txn TXNID $key} "Recd017.c: delete"}
- }
-
- # These are all the data values that we're going to need to read
- # through the operation table and run the recovery tests.
-
- if { [is_record_based $method] == 1 } {
- set key 1
- } else {
- set key recd017_key
- }
- set data recd017_data
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
- }
-
- if { [is_queue $method] != 1 } {
- if { [string first append $cmd] != -1 } {
- continue
- }
- if { [string first consume $cmd] != -1 } {
- continue
- }
- }
-
-# if { [is_fixed_length $method] == 1 } {
-# if { [string first partial $cmd] != -1 } {
-# continue
-# }
-# }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg
- op_recover commit $testdir $env_cmd $testfile $cmd $msg
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg
- }
- set fixed_len $orig_fixed_len
- return
-}
diff --git a/storage/bdb/test/recd018.tcl b/storage/bdb/test/recd018.tcl
deleted file mode 100644
index 2f2300cf97b..00000000000
--- a/storage/bdb/test/recd018.tcl
+++ /dev/null
@@ -1,110 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd018.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST recd018
-# TEST Test recover of closely interspersed checkpoints and commits.
-#
-# This test is from the error case from #4230.
-#
-proc recd018 { method {ndbs 10} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "018"
-
- puts "Recd$tnum ($args): $method recovery of checkpoints and commits."
-
- set tname recd$tnum.db
- env_cleanup $testdir
-
- set i 0
- if { [is_record_based $method] == 1 } {
- set key 1
- set key2 2
- } else {
- set key KEY
- set key2 KEY2
- }
-
- puts "\tRecd$tnum.a: Create environment and database."
- set flags "-create -txn -home $testdir"
-
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
- for { set i 0 } { $i < $ndbs } { incr i } {
- set testfile $tname.$i
- set db($i) [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db($i)] TRUE
- set file $testdir/$testfile.init
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile init
- }
-
- # Main loop: Write a record or two to each database.
- # Do a commit immediately followed by a checkpoint after each one.
- error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
-
- puts "\tRecd$tnum.b Put/Commit/Checkpoint to $ndbs databases"
- for { set i 0 } { $i < $ndbs } { incr i } {
- set testfile $tname.$i
- set data $i
-
- # Put, in a txn.
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put \
- [$db($i) put -txn $txn $key [chop_data $method $data]] 0
- error_check_good txn_commit [$txn commit] 0
- error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
- if { [expr $i % 2] == 0 } {
- set txn [$dbenv txn]
- error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put [$db($i) put \
- -txn $txn $key2 [chop_data $method $data]] 0
- error_check_good txn_commit [$txn commit] 0
- error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
- }
- error_check_good db_close [$db($i) close] 0
- set file $testdir/$testfile.afterop
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile afterop
- }
- error_check_good env_close [$dbenv close] 0
-
- # Now, loop through and recover to each timestamp, verifying the
- # expected increment.
- puts "\tRecd$tnum.c: Run recovery (no-op)"
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
-
- puts "\tRecd$tnum.d: Run recovery (initial file)"
- for { set i 0 } {$i < $ndbs } { incr i } {
- set testfile $tname.$i
- set file $testdir/$testfile.init
- catch { file copy -force $file $testdir/$testfile } res
- move_file_extent $testdir $testfile init copy
- }
-
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
-
- puts "\tRecd$tnum.e: Run recovery (after file)"
- for { set i 0 } {$i < $ndbs } { incr i } {
- set testfile $tname.$i
- set file $testdir/$testfile.afterop
- catch { file copy -force $file $testdir/$testfile } res
- move_file_extent $testdir $testfile afterop copy
- }
-
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
-
-}
diff --git a/storage/bdb/test/recd019.tcl b/storage/bdb/test/recd019.tcl
deleted file mode 100644
index 4e4e6051739..00000000000
--- a/storage/bdb/test/recd019.tcl
+++ /dev/null
@@ -1,123 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd019.tcl,v 11.6 2004/07/07 19:08:21 carol Exp $
-#
-# TEST recd019
-# TEST Test txn id wrap-around and recovery.
-proc recd019 { method {numid 50} args} {
- global fixed_len
- global txn_curid
- global log_log_record_types
- source ./include.tcl
-
- set orig_fixed_len $fixed_len
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Recd019: $method txn id wrap-around test"
-
- # Create the database and environment.
- env_cleanup $testdir
-
- set testfile recd019.db
-
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd019.a: creating environment"
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # Test txn wrapping. Force a txn_recycle msg.
- #
- set new_curid $txn_curid
- set new_maxid [expr $new_curid + $numid]
- error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
-
- #
- # We need to create a database to get the pagesize (either
- # the default or whatever might have been specified).
- # Then remove it so we can compute fixed_len and create the
- # real database.
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set stat [$db stat]
- #
- # Compute the fixed_len based on the pagesize being used.
- # We want the fixed_len to be 1/4 the pagesize.
- #
- set pg [get_pagesize $stat]
- error_check_bad get_pagesize $pg -1
- set fixed_len [expr $pg / 4]
- error_check_good db_close [$db close] 0
- error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
-
- # Convert the args again because fixed_len is now real.
- # Create the databases and close the environment.
- # cannot specify db truncate in txn protected env!!!
- set opts [convert_args $method ""]
- set omethod [convert_method $method]
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -auto_commit $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- #
- # Force txn ids to wrap twice and then some.
- #
- set nument [expr $numid * 3 - 2]
- puts "\tRecd019.b: Wrapping txn ids after $numid"
- set file $testdir/$testfile.init
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile init
- for { set i 1 } { $i <= $nument } { incr i } {
- # Use 'i' as key so method doesn't matter
- set key $i
- set data $i
-
- # Put, in a txn.
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put \
- [$db put -txn $txn $key [chop_data $method $data]] 0
- error_check_good txn_commit [$txn commit] 0
- }
- error_check_good db_close [$db close] 0
- set file $testdir/$testfile.afterop
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile afterop
- error_check_good env_close [$dbenv close] 0
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $testdir
- }
-
- # Now, loop through and recover.
- puts "\tRecd019.c: Run recovery (no-op)"
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
-
- puts "\tRecd019.d: Run recovery (initial file)"
- set file $testdir/$testfile.init
- catch { file copy -force $file $testdir/$testfile } res
- move_file_extent $testdir $testfile init copy
-
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
-
- puts "\tRecd019.e: Run recovery (after file)"
- set file $testdir/$testfile.afterop
- catch { file copy -force $file $testdir/$testfile } res
- move_file_extent $testdir $testfile afterop copy
-
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
- set fixed_len $orig_fixed_len
- return
-}
diff --git a/storage/bdb/test/recd020.tcl b/storage/bdb/test/recd020.tcl
deleted file mode 100644
index 6fbe4b78e23..00000000000
--- a/storage/bdb/test/recd020.tcl
+++ /dev/null
@@ -1,80 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd020.tcl,v 11.3 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST recd020
-# TEST Test creation of intermediate directories -- an
-# TEST undocumented, UNIX-only feature.
-#
-proc recd020 { method args } {
- source ./include.tcl
- global tcl_platform
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "020"
- set nentries 10
-
- if { $tcl_platform(platform) != "unix" } {
- puts "Skipping recd$tnum for non-UNIX platform."
- return
- }
-
- puts "Recd$tnum ($method):\
- Test creation of intermediate directories in recovery."
-
- # Create the original intermediate directory.
- env_cleanup $testdir
- set intdir INTDIR
- file mkdir $testdir/$intdir
-
- set testfile recd$tnum.db
- set flags "-create -txn -home $testdir"
-
- puts "\tRecd$tnum.a: Create environment and populate database."
- set env_cmd "berkdb_env $flags"
- set env [eval $env_cmd]
- error_check_good env [is_valid_env $env] TRUE
-
- set db [eval berkdb_open \
- -create $omethod $args -env $env -auto_commit $intdir/$testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set txn [$env txn]
- set data "data"
- for { set i 1 } { $i <= $nentries } { incr i } {
- error_check_good db_put [eval \
- {$db put} -txn $txn $i [chop_data $method $data.$i]] 0
- }
- error_check_good txn_commit [$txn commit] 0
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
-
- puts "\tRecd$tnum.b: Remove intermediate directory."
- error_check_good directory_there [file exists $testdir/$intdir] 1
- file delete -force $testdir/$intdir
- error_check_good directory_gone [file exists $testdir/$intdir] 0
-
- puts "\tRecd020.c: Run recovery, recreating intermediate directory."
- set env [eval $env_cmd -set_intermediate_dir 0751 -recover]
- error_check_good env [is_valid_env $env] TRUE
-
- puts "\tRecd020.d: Reopen test file to verify success."
- set db [berkdb_open -env $env $intdir/$testfile]
- error_check_good db_open [is_valid_db $db] TRUE
- for { set i 1 } { $i <= $nentries } { incr i } {
- set ret [$db get $i]
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_good key $k $i
- error_check_good data $d [pad_data $method $data.$i]
- }
-
- # Clean up.
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
-
-}
diff --git a/storage/bdb/test/recd15scr.tcl b/storage/bdb/test/recd15scr.tcl
deleted file mode 100644
index ef6fe7d0332..00000000000
--- a/storage/bdb/test/recd15scr.tcl
+++ /dev/null
@@ -1,74 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recd15scr.tcl,v 1.7 2004/01/28 03:36:29 bostic Exp $
-#
-# Recd15 - lots of txns - txn prepare script
-# Usage: recd15script envcmd dbcmd gidf numtxns
-# envcmd: command to open env
-# dbfile: name of database file
-# gidf: name of global id file
-# numtxns: number of txns to start
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "recd15script envcmd dbfile gidfile numtxns"
-
-# Verify usage
-if { $argc != 4 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set envcmd [ lindex $argv 0 ]
-set dbfile [ lindex $argv 1 ]
-set gidfile [ lindex $argv 2 ]
-set numtxns [ lindex $argv 3 ]
-
-set txnmax [expr $numtxns + 5]
-set dbenv [eval $envcmd]
-error_check_good envopen [is_valid_env $dbenv] TRUE
-
-set usedb 0
-if { $dbfile != "NULL" } {
- set usedb 1
- set db [berkdb_open -auto_commit -env $dbenv $dbfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-}
-
-puts "\tRecd015script.a: Begin $numtxns txns"
-for {set i 0} {$i < $numtxns} {incr i} {
- set t [$dbenv txn]
- error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
- set txns($i) $t
- if { $usedb } {
- set dbc [$db cursor -txn $t]
- error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
- set curs($i) $dbc
- }
-}
-
-puts "\tRecd015script.b: Prepare $numtxns txns"
-set gfd [open $gidfile w+]
-for {set i 0} {$i < $numtxns} {incr i} {
- if { $usedb } {
- set dbc $curs($i)
- error_check_good dbc_close [$dbc close] 0
- }
- set t $txns($i)
- set gid [make_gid recd015script:$t]
- puts $gfd $gid
- error_check_good txn_prepare:$t [$t prepare $gid] 0
-}
-close $gfd
-
-#
-# We do not close the db or env, but exit with the txns outstanding.
-#
-puts "\tRecd015script completed successfully"
-flush stdout
diff --git a/storage/bdb/test/recdscript.tcl b/storage/bdb/test/recdscript.tcl
deleted file mode 100644
index 559d3407a09..00000000000
--- a/storage/bdb/test/recdscript.tcl
+++ /dev/null
@@ -1,37 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: recdscript.tcl,v 11.6 2004/01/28 03:36:29 bostic Exp $
-#
-# Recovery txn prepare script
-# Usage: recdscript op dir envcmd dbfile cmd
-# op: primary txn operation
-# dir: test directory
-# envcmd: command to open env
-# dbfile: name of database file
-# gidf: name of global id file
-# cmd: db command to execute
-
-source ./include.tcl
-source $test_path/test.tcl
-
-set usage "recdscript op dir envcmd dbfile gidfile cmd"
-
-# Verify usage
-if { $argc != 6 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set op [ lindex $argv 0 ]
-set dir [ lindex $argv 1 ]
-set envcmd [ lindex $argv 2 ]
-set dbfile [ lindex $argv 3 ]
-set gidfile [ lindex $argv 4 ]
-set cmd [ lindex $argv 5 ]
-
-op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
-flush stdout
diff --git a/storage/bdb/test/rep001.tcl b/storage/bdb/test/rep001.tcl
deleted file mode 100644
index 94163986a04..00000000000
--- a/storage/bdb/test/rep001.tcl
+++ /dev/null
@@ -1,198 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rep001.tcl,v 1.35 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST rep001
-# TEST Replication rename and forced-upgrade test.
-# TEST
-# TEST Run rep_test in a replicated master environment.
-# TEST Verify that the database on the client is correct.
-# TEST Next, remove the database, close the master, upgrade the
-# TEST client, reopen the master, and make sure the new master can
-# TEST correctly run rep_test and propagate it in the other direction.
-
-proc rep001 { method { niter 1000 } { tnum "001" } args } {
- global passwd
- global has_crypto
-
- # Run tests with and without recovery. If we're doing testing
- # of in-memory logging, skip the combination of recovery
- # and in-memory logging -- it doesn't make sense.
- set logsets [create_logsets 2]
- set saved_args $args
-
- foreach recopt { "" "-recover" } {
- foreach l $logsets {
- set logindex [lsearch -exact $l "in-memory"]
- if { $recopt == "-recover" && $logindex != -1 } {
- puts "Skipping test with -recover for in-memory logs."
- continue
- }
- set envargs ""
- set args $saved_args
- puts "Rep$tnum: Replication sanity test ($method $recopt)."
- puts "Rep$tnum: Master logs are [lindex $l 0]"
- puts "Rep$tnum: Client logs are [lindex $l 1]"
- rep001_sub $method \
- $niter $tnum $envargs $l $recopt $args
-
- # Skip encrypted tests if not supported.
- if { $has_crypto == 0 } {
- continue
- }
-
- # Run the same tests with security.
- append envargs " -encryptaes $passwd "
- append args " -encrypt "
- puts "Rep$tnum: Replication and security sanity test\
- ($method $recopt)."
- puts "Rep$tnum: Master logs are [lindex $l 0]"
- puts "Rep$tnum: Client logs are [lindex $l 1]"
- rep001_sub $method \
- $niter $tnum $envargs $l $recopt $args
- }
- }
-}
-
-proc rep001_sub { method niter tnum envargs logset recargs largs } {
- source ./include.tcl
- global testdir
- global encrypt
-
- env_cleanup $testdir
-
- replsetup $testdir/MSGQUEUEDIR
-
- set masterdir $testdir/MASTERDIR
- set clientdir $testdir/CLIENTDIR
-
- file mkdir $masterdir
- file mkdir $clientdir
-
- set m_logtype [lindex $logset 0]
- set c_logtype [lindex $logset 1]
-
- # In-memory logs require a large log buffer, and cannot
- # be used with -txn nosync. Adjust the args for master
- # and client.
- set m_logargs [adjust_logargs $m_logtype]
- set c_logargs [adjust_logargs $c_logtype]
- set m_txnargs [adjust_txnargs $m_logtype]
- set c_txnargs [adjust_txnargs $c_logtype]
-
- # Open a master.
- repladd 1
- set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \
- -log_max 1000000 $envargs $m_logargs $recargs \
- -home $masterdir -errpfx MASTER $m_txnargs -rep_master \
- -rep_transport \[list 1 replsend\]"
-# set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \
-# -log_max 1000000 $envargs $m_logargs $recargs \
-# -home $masterdir \
-# -verbose {rep on} -errfile /dev/stderr \
-# -errpfx MASTER $m_txnargs -rep_master \
-# -rep_transport \[list 1 replsend\]"
- set masterenv [eval $env_cmd(M)]
- error_check_good master_env [is_valid_env $masterenv] TRUE
-
- # Open a client
- repladd 2
- set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \
- -log_max 1000000 $envargs $c_logargs $recargs \
- -home $clientdir -errpfx CLIENT $c_txnargs -rep_client \
- -rep_transport \[list 2 replsend\]"
-# set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \
-# -log_max 1000000 $envargs $c_logargs $recargs \
-# -home $clientdir \
-# -verbose {rep on} -errfile /dev/stderr \
-# -errpfx CLIENT $c_txnargs -rep_client \
-# -rep_transport \[list 2 replsend\]"
- set clientenv [eval $env_cmd(C)]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
- # Bring the client online by processing the startup messages.
- set envlist "{$masterenv 1} {$clientenv 2}"
- process_msgs $envlist
-
- # Run rep_test in the master (and update client).
- puts "\tRep$tnum.a:\
- Running rep_test in replicated env ($envargs $recargs)."
- eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
- process_msgs $envlist
-
- puts "\tRep$tnum.b: Verifying client database contents."
- set dbname "test.db"
- set masterdb [berkdb_open -env $masterenv -auto_commit $dbname]
- set clientdb [berkdb_open -env $clientenv -auto_commit $dbname]
-
- error_check_good compare_master_and_client [db_compare \
- $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0
-
- error_check_good master_close [$masterdb close] 0
- error_check_good client_close [$clientdb close] 0
-
- # Remove the file (and update client).
- puts "\tRep$tnum.c: Remove the file on the master and close master."
- error_check_good remove \
- [$masterenv dbremove -auto_commit $dbname] 0
- error_check_good masterenv_close [$masterenv close] 0
- process_msgs $envlist
-
- puts "\tRep$tnum.d: Upgrade client."
- set newmasterenv $clientenv
- error_check_good upgrade_client [$newmasterenv rep_start -master] 0
-
- # Run rep_test in the new master
- puts "\tRep$tnum.e: Running rep_test in new master."
- eval rep_test $method $newmasterenv NULL $niter 0 0 0 $largs
- set envlist "{$newmasterenv 2}"
- process_msgs $envlist
-
- puts "\tRep$tnum.f: Reopen old master as client and catch up."
- # Throttle master so it can't send everything at once
- $newmasterenv rep_limit 0 [expr 64 * 1024]
- set newclientenv [eval {berkdb_env -create -recover} $envargs \
- -txn nosync -lock_max 2500 \
- {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
- error_check_good newclient_env [is_valid_env $newclientenv] TRUE
- set envlist "{$newclientenv 1} {$newmasterenv 2}"
- process_msgs $envlist
-
- # If we're running with a low number of iterations, we might
- # not have had to throttle the data transmission; skip the check.
- if { $niter > 200 } {
- set nthrottles \
- [stat_field $newmasterenv rep_stat "Transmission limited"]
- error_check_bad nthrottles $nthrottles -1
- error_check_bad nthrottles $nthrottles 0
- }
-
- # Run a modified rep_test in the new master (and update client).
- puts "\tRep$tnum.g: Running rep_test in new master."
- eval rep_test $method \
- $newmasterenv NULL $niter $niter $niter 0 $largs
- process_msgs $envlist
-
- # Verify the database in the client dir.
- puts "\tRep$tnum.h: Verifying new client database contents."
- set masterdb [berkdb_open -env $newmasterenv -auto_commit $dbname]
- set clientdb [berkdb_open -env $newclientenv -auto_commit $dbname]
-
- error_check_good compare_master_and_client [db_compare \
- $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0
-
- error_check_good master_close [$masterdb close] 0
- error_check_good client_close [$clientdb close] 0
- error_check_good newmasterenv_close [$newmasterenv close] 0
- error_check_good newclientenv_close [$newclientenv close] 0
-
- if { [lsearch $envargs "-encrypta*"] !=-1 } {
- set encrypt 1
- }
- error_check_good verify \
- [verify_dir $clientdir "\tRep$tnum.k: " 0 0 1] 0
- replclose $testdir/MSGQUEUEDIR
-}
diff --git a/storage/bdb/test/rep002.tcl b/storage/bdb/test/rep002.tcl
deleted file mode 100644
index 1a3683f2e59..00000000000
--- a/storage/bdb/test/rep002.tcl
+++ /dev/null
@@ -1,323 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2002-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rep002.tcl,v 11.37 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST rep002
-# TEST Basic replication election test.
-# TEST
-# TEST Run a modified version of test001 in a replicated master
-# TEST environment; hold an election among a group of clients to
-# TEST make sure they select a proper master from amongst themselves,
-# TEST in various scenarios.
-
-proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } {
-
- if { [is_record_based $method] == 1 } {
- puts "Rep002: Skipping for method $method."
- return
- }
-
- set logsets [create_logsets [expr $nclients + 1]]
-
- # Run the body of the test with and without recovery.
- set recopts { "" "-recover" }
- foreach r $recopts {
- foreach l $logsets {
- set logindex [lsearch -exact $l "in-memory"]
- if { $r == "-recover" && $logindex != -1 } {
- puts "Skipping test with -recover for in-memory logs."
- }
- puts "Rep$tnum ($method $r):\
- Replication election test with $nclients clients."
- puts "Rep$tnum: Master logs are [lindex $l 0]"
- for { set i 0 } { $i < $nclients } { incr i } {
- puts "Rep$tnum: Client $i logs are\
- [lindex $l [expr $i + 1]]"
- }
- rep002_sub $method $niter $nclients $tnum $l $r $args
- }
- }
-}
-
-proc rep002_sub { method niter nclients tnum logset recargs largs } {
- source ./include.tcl
- global elect_timeout elect_serial
- global is_windows_test
- set elect_timeout 5000000
-
- env_cleanup $testdir
-
- set qdir $testdir/MSGQUEUEDIR
- replsetup $qdir
-
- set masterdir $testdir/MASTERDIR
- file mkdir $masterdir
- set m_logtype [lindex $logset 0]
- set m_logargs [adjust_logargs $m_logtype]
- set m_txnargs [adjust_txnargs $m_logtype]
-
- for { set i 0 } { $i < $nclients } { incr i } {
- set clientdir($i) $testdir/CLIENTDIR.$i
- file mkdir $clientdir($i)
- set c_logtype($i) [lindex $logset [expr $i + 1]]
- set c_logargs($i) [adjust_logargs $c_logtype($i)]
- set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
- }
-
- # Open a master.
- repladd 1
- set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
- -home $masterdir $m_logargs -errpfx MASTER \
- $m_txnargs -rep_master -rep_transport \[list 1 replsend\]"
-# set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
-# -home $masterdir $m_logargs -errpfx MASTER -errfile /dev/stderr \
-# -verbose {rep on} $m_txnargs -rep_master \
-# -rep_transport \[list 1 replsend\]"
- # In an election test, the -recovery arg must not go
- # in the env_cmd string because that is going to be
- # passed to a child process.
- set masterenv [eval $env_cmd(M) $recargs]
- error_check_good master_env [is_valid_env $masterenv] TRUE
-
- # Open the clients.
- for { set i 0 } { $i < $nclients } { incr i } {
- set envid [expr $i + 2]
- repladd $envid
- set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
- $c_logargs($i) $c_txnargs($i) -rep_client -errpfx CLIENT$i \
- -rep_transport \[list $envid replsend\]"
-# set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
-# $c_logargs($i) -verbose {rep on} -errfile /dev/stderr \
-# $c_txnargs($i) -rep_client -errpfx CLIENT$i \
-# -rep_transport \[list $envid replsend\]"
- set clientenv($i) [eval $env_cmd($i) $recargs]
- error_check_good \
- client_env($i) [is_valid_env $clientenv($i)] TRUE
- }
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- set envlist {}
- lappend envlist "$masterenv 1"
- for { set i 0 } { $i < $nclients } { incr i } {
- lappend envlist "$clientenv($i) [expr $i + 2]"
- }
- process_msgs $envlist
-
- # Run a modified test001 in the master.
- puts "\tRep$tnum.a: Running test001 in replicated env."
- eval test001 $method $niter 0 0 $tnum -env $masterenv $largs
- process_msgs $envlist
-
- # Verify the database in the client dir.
- for { set i 0 } { $i < $nclients } { incr i } {
- puts "\tRep$tnum.b: Verifying contents of client database $i."
- set testdir [get_home $masterenv]
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- open_and_dump_file test$tnum.db $clientenv($i) $testdir/t1 \
- test001.check dump_file_direction "-first" "-next"
-
- if { [string compare [convert_method $method] -recno] != 0 } {
- filesort $t1 $t3
- }
- error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
-
- verify_dir $clientdir($i) "\tRep$tnum.c: " 0 0 1
- }
-
- # Start an election in the first client.
- puts "\tRep$tnum.d: Starting election with existing master."
- # We want to verify that the master declares the election
- # over by fiat, even if everyone uses a lower priority than 20.
- # Loop and process all messages, keeping track of which
- # sites got a HOLDELECTION and checking that the returned newmaster,
- # if any, is 1 (the master's replication ID).
- set got_hold_elect(M) 0
- for { set i 0 } { $i < $nclients } { incr i } {
- set got_hold_elect($i) 0
- set elect_pipe($i) INVALID
- }
- set elect_pipe(0) [start_election C0 \
- $qdir $env_cmd(0) [expr $nclients + 1] $nclients 20 $elect_timeout]
-
- tclsleep 2
-
- set got_master 0
- while { 1 } {
- set nproced 0
- set he 0
- set nm 0
- set nm2 0
-
- incr nproced [replprocessqueue $masterenv 1 0 he nm]
-
- if { $he == 1 } {
- incr elect_serial
- set elect_pipe(M) [start_election CM $qdir \
- $env_cmd(M) [expr $nclients + 1] $nclients \
- 0 $elect_timeout]
- set got_hold_elect(M) 1
- }
- if { $nm != 0 } {
- error_check_good newmaster_is_master $nm 1
- set got_master $nm
- }
- if { $nm2 != 0 } {
- error_check_good newmaster_is_master $nm2 1
- set got_master $nm2
- }
-
- for { set i 0 } { $i < $nclients } { incr i } {
- set he 0
- set envid [expr $i + 2]
- incr nproced \
- [replprocessqueue $clientenv($i) $envid 0 he nm]
- set child_done [check_election $elect_pipe($i) nm2]
- if { $he == 1 } {
- # error_check_bad client(0)_in_elect $i 0
- if { $elect_pipe($i) != "INVALID" } {
- close_election $elect_pipe($i)
- }
- incr elect_serial
- set pfx CHILD$i.$elect_serial
- set elect_pipe($i) [start_election $pfx $qdir \
- $env_cmd($i) [expr $nclients + 1] \
- $nclients 0 \
- $elect_timeout]
- set got_hold_elect($i) 1
- }
- if { $nm != 0 } {
- error_check_good newmaster_is_master $nm 1
- set got_master $nm
- }
- if { $nm2 != 0 } {
- error_check_good newmaster_is_master $nm2 1
- set got_master $nm2
- }
- }
-
- if { $nproced == 0 } {
- break
- }
- }
- error_check_good got_master $got_master 1
- cleanup_elections
-
- # We need multiple clients to proceed from here.
- if { $nclients < 2 } {
- puts "\tRep$tnum: Skipping for less than two clients."
- error_check_good masterenv_close [$masterenv close] 0
- for { set i 0 } { $i < $nclients } { incr i } {
- error_check_good clientenv_close($i) \
- [$clientenv($i) close] 0
- }
- return
- }
-
- # Make sure all the clients are synced up and ready to be good
- # voting citizens.
- error_check_good master_flush [$masterenv rep_flush] 0
- process_msgs $envlist
-
- # Now hold another election in the first client, this time with
- # a dead master.
- puts "\tRep$tnum.e: Starting election with dead master."
- error_check_good masterenv_close [$masterenv close] 0
- set envlist [lreplace $envlist 0 0]
-
- set m "Rep$tnum.e"
- # We're not going to be using err_cmd, so initialize to "none".
- # Client #1 has priority 100; everyone else has priority 10.
- for { set i 0 } { $i < $nclients } { incr i } {
- set err_cmd($i) "none"
- set crash($i) 0
- if { $i == 1 } {
- set pri($i) 100
- } else {
- set pri($i) 10
- }
- }
- set nsites $nclients
- set nvotes $nclients
- # The elector calls the first election. The expected winner
- # is $win.
- set elector 1
- set win 1
- run_election env_cmd envlist err_cmd pri crash $qdir $m \
- $elector $nsites $nvotes $nclients $win 1 "test$tnum.db"
-
- # Hold an election with two clients at the same (winning) priority.
- # Make sure that the tie gets broken, and that the third client
- # does not win.
- puts "\tRep$tnum.f: Election with two clients at same priority."
- set m "Rep$tnum.f"
- # Clients 0 and 1 have high, matching priority.
- for { set i 0 } { $i < $nclients } { incr i } {
- if { $i >= 2 } {
- set pri($i) 10
- } else {
- set pri($i) 100
- }
- }
-
- # Run several elections.
- set elections 5
- for { set i 0 } { $i < $elections } { incr i } {
- #
- # The expected winner is 0 or 1. Since run_election can only
- # handle one expected winner, catch the result and inspect it.
- #
- set elector 0
- set win 1
- set altwin 0
- if {[catch {eval run_election \
- env_cmd envlist err_cmd pri crash $qdir $m $elector $nsites \
- $nvotes $nclients $win 1 "test$tnum.db"} res]} {
- #
- # If the primary winner didn't win, make sure
- # the alternative winner won. Do all the cleanup
- # for that winner normally done in run_election:
- # open and close the new master, then reopen as a
- # client for the next cycle.
- #
- puts "\t$m: Election $i: Alternate winner $altwin won."
- error_check_good check_winner [is_substr \
- $res "expected 3, got [expr $altwin + 2]"] 1
- error_check_good make_master \
- [$clientenv($altwin) rep_start -master] 0
-
- cleanup_elections
- process_msgs $envlist
-
- error_check_good newmaster_close \
- [$clientenv($altwin) close] 0
- set clientenv($altwin) [eval $env_cmd($altwin)]
- error_check_good cl($altwin) \
- [is_valid_env $clientenv($altwin)] TRUE
- set newelector "$clientenv($altwin) [expr $altwin + 2]"
- set envlist [lreplace $envlist $altwin $altwin $newelector]
- } else {
- puts "\t$m: Election $i: Primary winner $win won."
- }
- process_msgs $envlist
- }
-
- foreach pair $envlist {
- set cenv [lindex $pair 0]
- error_check_good cenv_close [$cenv close] 0
- }
-
- replclose $testdir/MSGQUEUEDIR
-
- # If we're on Windows, we need to forcibly remove some of the
- # files created when the alternate winner won.
- if { $is_windows_test == 1 } {
- set filelist [glob -nocomplain $testdir/CLIENTDIR.$altwin/*]
- fileremove -f $filelist
- }
-}
diff --git a/storage/bdb/test/rep003.tcl b/storage/bdb/test/rep003.tcl
deleted file mode 100644
index 13ef257da40..00000000000
--- a/storage/bdb/test/rep003.tcl
+++ /dev/null
@@ -1,277 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2002-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rep003.tcl,v 11.19 2004/09/22 18:01:05 bostic Exp $
-#
-# TEST rep003
-# TEST Repeated shutdown/restart replication test
-# TEST
-# TEST Run a quick put test in a replicated master environment;
-# TEST start up, shut down, and restart client processes, with
-# TEST and without recovery. To ensure that environment state
-# TEST is transient, use DB_PRIVATE.
-
-proc rep003 { method { tnum "003" } args } {
- source ./include.tcl
- global rep003_dbname rep003_omethod rep003_oargs
-
- if { [is_record_based $method] } {
- puts "Rep$tnum: Skipping for method $method"
- return
- }
-
- set rep003_dbname rep003.db
- set rep003_omethod [convert_method $method]
- set rep003_oargs [convert_args $method $args]
-
- # Run the body of the test with and without recovery. If we're
- # testing in-memory logging, skip the combination of recovery
- # and in-memory logging -- it doesn't make sense.
-
- set logsets [create_logsets 2]
- foreach recopt { "" "-recover" } {
- foreach l $logsets {
- set logindex [lsearch -exact $l "in-memory"]
- if { $recopt == "-recover" && $logindex != -1 } {
- puts "Rep$tnum: Skipping for\
- in-memory logs with -recover."
- continue
- }
- puts "Rep$tnum ($method $recopt):\
- Replication repeated-startup test."
- puts "Rep$tnum: Master logs are [lindex $l 0]"
- puts "Rep$tnum: Client logs are [lindex $l 1]"
- rep003_sub $method $tnum $l $recopt $args
- }
- }
-}
-
-proc rep003_sub { method tnum logset recargs largs } {
- source ./include.tcl
-
- env_cleanup $testdir
-
- replsetup $testdir/MSGQUEUEDIR
-
- set masterdir $testdir/MASTERDIR
- set clientdir $testdir/CLIENTDIR
-
- file mkdir $masterdir
- file mkdir $clientdir
-
- set m_logtype [lindex $logset 0]
- set c_logtype [lindex $logset 1]
-
- # In-memory logs require a large log buffer, and cannot
- # be used with -txn nosync. This test already requires
- # -txn, so adjust the logargs only.
- set m_logargs [adjust_logargs $m_logtype]
- set c_logargs [adjust_logargs $c_logtype]
-
- # Open a master.
- repladd 1
- set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
- -errpfx MASTER -errfile /dev/stderr \
- -home $masterdir -txn $m_logargs -rep_master \
- -rep_transport \[list 1 replsend\]"
- set masterenv [eval $env_cmd(M) $recargs]
- error_check_good master_env [is_valid_env $masterenv] TRUE
-
- puts "\tRep$tnum.a: Simple client startup test."
-
- # Put item one.
- rep003_put $masterenv A1 a-one
-
- # Open a client.
- repladd 2
- set env_cmd(C) "berkdb_env_noerr -create -private -home $clientdir \
- -txn $c_logargs -errpfx CLIENT -errfile /dev/stderr \
- -rep_client -rep_transport \[list 2 replsend\]"
- set clientenv [eval $env_cmd(C) $recargs]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
- # Put another quick item.
- rep003_put $masterenv A2 a-two
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- set envlist "{$masterenv 1} {$clientenv 2}"
- process_msgs $envlist
-
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
-
- error_check_good clientenv_close [$clientenv close] 0
- replclear 2
-
- # Now reopen the client after doing another put.
- puts "\tRep$tnum.b: Client restart."
- rep003_put $masterenv B1 b-one
-
- unset clientenv
- set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
- -rep_client -rep_transport [list 2 replsend]]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
- # Loop letting the client and master sync up and get the
- # environment initialized. It's a new client env so
- # reinitialize the envlist as well.
- set envlist "{$masterenv 1} {$clientenv 2}"
- process_msgs $envlist
-
- # The items from part A should be present at all times--
- # if we roll them back, we've screwed up. [#5709]
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
-
- rep003_put $masterenv B2 b-two
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- while { 1 } {
- set nproced 0
-
- incr nproced [replprocessqueue $masterenv 1]
- incr nproced [replprocessqueue $clientenv 2]
-
- # The items from part A should be present at all times--
- # if we roll them back, we've screwed up. [#5709]
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
-
- if { $nproced == 0 } {
- break
- }
- }
-
- rep003_check $clientenv B1 b-one
- rep003_check $clientenv B2 b-two
-
- error_check_good clientenv_close [$clientenv close] 0
-
- replclear 2
-
- # Now reopen the client after a recovery.
- puts "\tRep$tnum.c: Client restart after recovery."
- rep003_put $masterenv C1 c-one
-
- unset clientenv
- set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
- -recover -rep_client -rep_transport [list 2 replsend]]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- set envlist "{$masterenv 1} {$clientenv 2}"
- process_msgs $envlist
-
- # The items from part A should be present at all times--
- # if we roll them back, we've screwed up. [#5709]
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
- rep003_check $clientenv B1 b-one
- rep003_check $clientenv B2 b-two
-
- rep003_put $masterenv C2 c-two
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- while { 1 } {
- set nproced 0
-
- # The items from part A should be present at all times--
- # if we roll them back, we've screwed up. [#5709]
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
- rep003_check $clientenv B1 b-one
- rep003_check $clientenv B2 b-two
-
- incr nproced [replprocessqueue $masterenv 1]
- incr nproced [replprocessqueue $clientenv 2]
-
- if { $nproced == 0 } {
- break
- }
- }
-
- rep003_check $clientenv C1 c-one
- rep003_check $clientenv C2 c-two
-
- error_check_good clientenv_close [$clientenv close] 0
-
- replclear 2
-
- # Now reopen the client after a catastrophic recovery.
- puts "\tRep$tnum.d: Client restart after catastrophic recovery."
- rep003_put $masterenv D1 d-one
-
- unset clientenv
- set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
- -recover_fatal -rep_client -rep_transport [list 2 replsend]]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- set envlist "{$masterenv 1} {$clientenv 2}"
- process_msgs $envlist
- rep003_put $masterenv D2 d-two
-
- # Loop, processing first the master's messages, then the client's,
- # until both queues are empty.
- while { 1 } {
- set nproced 0
-
- # The items from part A should be present at all times--
- # if we roll them back, we've screwed up. [#5709]
- rep003_check $clientenv A1 a-one
- rep003_check $clientenv A2 a-two
- rep003_check $clientenv B1 b-one
- rep003_check $clientenv B2 b-two
- rep003_check $clientenv C1 c-one
- rep003_check $clientenv C2 c-two
-
- incr nproced [replprocessqueue $masterenv 1]
- incr nproced [replprocessqueue $clientenv 2]
-
- if { $nproced == 0 } {
- break
- }
- }
-
- rep003_check $clientenv D1 d-one
- rep003_check $clientenv D2 d-two
-
- error_check_good clientenv_close [$clientenv close] 0
-
- error_check_good masterenv_close [$masterenv close] 0
- replclose $testdir/MSGQUEUEDIR
-}
-
-proc rep003_put { masterenv key data } {
- global rep003_dbname rep003_omethod rep003_oargs
-
- set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
- $rep003_omethod $rep003_oargs $rep003_dbname]
- error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
-
- set txn [$masterenv txn]
- error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
- error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
-
- error_check_good rep3_put_close($key,$data) [$db close] 0
-}
-
-proc rep003_check { env key data } {
- global rep003_dbname
-
- set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
- error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
-
- set dbt [$db get $key]
- error_check_good rep3_check($key,$data) \
- [lindex [lindex $dbt 0] 1] $data
-
- error_check_good rep3_put_close($key,$data) [$db close] 0
-}
diff --git a/storage/bdb/test/rep004.tcl b/storage/bdb/test/rep004.tcl
deleted file mode 100644
index e1d4d3b65c7..00000000000
--- a/storage/bdb/test/rep004.tcl
+++ /dev/null
@@ -1,198 +0,0 @@
-#
-# Copyright (c) 2002
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
-#
-# TEST rep004
-# TEST Test of DB_REP_LOGSONLY.
-# TEST
-# TEST Run a quick put test in a master environment that has one logs-only
-# TEST client. Shut down, then run catastrophic recovery in the logs-only
-# TEST client and check that the database is present and populated.
-
-proc rep004 { method { nitems 10 } { tnum "04" } args } {
- source ./include.tcl
- global testdir
-
- env_cleanup $testdir
- set dbname rep0$tnum.db
-
- set omethod [convert_method $method]
- set oargs [convert_args $method $args]
-
- puts "Rep0$tnum: Test of logs-only replication clients"
-
- replsetup $testdir/MSGQUEUEDIR
- set masterdir $testdir/MASTERDIR
- file mkdir $masterdir
- set clientdir $testdir/CLIENTDIR
- file mkdir $clientdir
- set logsonlydir $testdir/LOGSONLYDIR
- file mkdir $logsonlydir
-
- # Open a master, a logsonly replica, and a normal client.
- repladd 1
- set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
- -rep_transport [list 1 replsend]]
- error_check_good master_env [is_valid_env $masterenv] TRUE
-
- repladd 2
- set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
- -rep_transport [list 2 replsend]]
- error_check_good logsonly_env [is_valid_env $loenv] TRUE
-
- repladd 3
- set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
- -rep_transport [list 3 replsend]]
- error_check_good client_env [is_valid_env $clientenv] TRUE
-
-
- puts "\tRep0$tnum.a: Populate database."
-
- set db [eval {berkdb open -create -mode 0644 -auto_commit} \
- -env $masterenv $oargs $omethod $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nitems } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set data $str
- } else {
- set key $str
- set data [reverse $str]
- }
- set kvals($count) $key
- set dvals($count) [pad_data $method $data]
-
- set txn [$masterenv txn]
- error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
-
- set ret [eval \
- {$db put} -txn $txn {$key [chop_data $method $data]}]
- error_check_good put($count) $ret 0
-
- error_check_good commit($count) [$txn commit] 0
-
- incr count
- }
-
- puts "\tRep0$tnum.b: Sync up clients."
- set donenow 0
- while { 1 } {
- set nproced 0
-
- incr nproced [replprocessqueue $masterenv 1]
- incr nproced [replprocessqueue $loenv 2]
- incr nproced [replprocessqueue $clientenv 3]
-
- if { $nproced == 0 } {
- break
- }
- }
-
-
- puts "\tRep0$tnum.c: Get master and logs-only client ahead."
- set newcount 0
- while { [gets $did str] != -1 && $newcount < $nitems } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set data $str
- } else {
- set key $str
- set data [reverse $str]
- }
- set kvals($count) $key
- set dvals($count) [pad_data $method $data]
-
- set txn [$masterenv txn]
- error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
-
- set ret [eval \
- {$db put} -txn $txn {$key [chop_data $method $data]}]
- error_check_good put($count) $ret 0
-
- error_check_good commit($count) [$txn commit] 0
-
- incr count
- incr newcount
- }
-
- error_check_good db_close [$db close] 0
-
- puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
- set donenow 0
- while { 1 } {
- set nproced 0
-
- incr nproced [replprocessqueue $masterenv 1]
- incr nproced [replprocessqueue $loenv 2]
-
- if { $nproced == 0 } {
- break
- }
- }
-
-
- # "Crash" the master, and fail over to the upgradeable client.
- error_check_good masterenv_close [$masterenv close] 0
- replclear 3
-
- error_check_good upgrade_client [$clientenv rep_start -master] 0
- set donenow 0
- while { 1 } {
- set nproced 0
-
- incr nproced [replprocessqueue $clientenv 3]
- incr nproced [replprocessqueue $loenv 2]
-
- if { $nproced == 0 } {
- break
- }
- }
-
- error_check_good loenv_close [$loenv close] 0
-
- puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
- set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
-
- puts "\tRep0$tnum.f: Verify logs-only client contents."
- set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
- set loc [$lodb cursor]
-
- set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
- set cc [$cdb cursor]
-
- # Make sure new master and recovered logs-only replica match.
- for { set cdbt [$cc get -first] } \
- { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
- set lodbt [$loc get -next]
-
- error_check_good newmaster_replica_match $cdbt $lodbt
- }
-
- # Reset new master cursor.
- error_check_good cc_close [$cc close] 0
- set cc [$cdb cursor]
-
- for { set lodbt [$loc get -first] } \
- { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
- set cdbt [$cc get -next]
-
- error_check_good replica_newmaster_match $lodbt $cdbt
- }
-
- error_check_good loc_close [$loc close] 0
- error_check_good lodb_close [$lodb close] 0
- error_check_good loenv_close [$loenv close] 0
-
- error_check_good cc_close [$cc close] 0
- error_check_good cdb_close [$cdb close] 0
- error_check_good clientenv_close [$clientenv close] 0
-
- close $did
-
- replclose $testdir/MSGQUEUEDIR
-}
diff --git a/storage/bdb/test/rep005.tcl b/storage/bdb/test/rep005.tcl
deleted file mode 100644
index 517fa955d5d..00000000000
--- a/storage/bdb/test/rep005.tcl
+++ /dev/null
@@ -1,307 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2002-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rep005.tcl,v 11.41 2004/10/15 15:41:56 sue Exp $
-#
-# TEST rep005
-# TEST Replication election test with error handling.
-# TEST
-# TEST Run a modified version of test001 in a replicated master environment;
-# TEST hold an election among a group of clients to make sure they select
-# TEST a proper master from amongst themselves, forcing errors at various
-# TEST locations in the election path.
-
-proc rep005 { method args } {
- if { [is_btree $method] == 0 } {
- puts "Rep005: Skipping for method $method."
- return
- }
-
- set tnum "005"
- set niter 10
- set nclients 3
- set logsets [create_logsets [expr $nclients + 1]]
-
- # We don't want to run this with -recover - it takes too
- # long and doesn't cover any new ground.
- set recargs ""
- foreach l $logsets {
- puts "Rep$tnum ($recargs): Replication election\
- error test with $nclients clients."
- puts -nonewline "Rep$tnum: Started at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts "Rep$tnum: Master logs are [lindex $l 0]"
- for { set i 0 } { $i < $nclients } { incr i } {
- puts "Rep$tnum: Client $i logs are\
- [lindex $l [expr $i + 1]]"
- }
- rep005_sub $method $tnum \
- $niter $nclients $l $recargs $args
- }
-}
-
-proc rep005_sub { method tnum niter nclients logset recargs largs } {
- source ./include.tcl
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- env_cleanup $testdir
-
- set qdir $testdir/MSGQUEUEDIR
- replsetup $qdir
-
- set masterdir $testdir/MASTERDIR
- file mkdir $masterdir
- set m_logtype [lindex $logset 0]
- set m_logargs [adjust_logargs $m_logtype]
- set m_txnargs [adjust_txnargs $m_logtype]
-
- for { set i 0 } { $i < $nclients } { incr i } {
- set clientdir($i) $testdir/CLIENTDIR.$i
- file mkdir $clientdir($i)
- set c_logtype($i) [lindex $logset [expr $i + 1]]
- set c_logargs($i) [adjust_logargs $c_logtype($i)]
- set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
- }
-
- # Open a master.
- repladd 1
- set env_cmd(M) "berkdb_env -create -log_max 1000000 \
- -home $masterdir $m_logargs \
- $m_txnargs -rep_master -rep_transport \[list 1 replsend\]"
-# To debug elections, uncomment the line below and further below
-# for the clients to turn on verbose. Also edit reputils.tcl
-# in proc start_election and swap the 2 commented lines with
-# their counterpart.
-# set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
-# -home $masterdir $m_logargs \
-# $m_txnargs -rep_master \
-# -verbose {rep on} -errpfx MASTER -errfile /dev/stderr \
-# -rep_transport \[list 1 replsend\]"
- set masterenv [eval $env_cmd(M) $recargs]
- error_check_good master_env [is_valid_env $masterenv] TRUE
-
- set envlist {}
- lappend envlist "$masterenv 1"
-
- # Open the clients.
- for { set i 0 } { $i < $nclients } { incr i } {
- set envid [expr $i + 2]
- repladd $envid
- set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
- $c_logargs($i) $c_txnargs($i) -rep_client \
- -rep_transport \[list $envid replsend\]"
-# set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
-# -verbose {rep on} -errpfx CLIENT$i -errfile /dev/stderr \
-# $c_logargs($i) $c_txnargs($i) -rep_client \
-# -rep_transport \[list $envid replsend\]"
- set clientenv($i) [eval $env_cmd($i) $recargs]
- error_check_good \
- client_env($i) [is_valid_env $clientenv($i)] TRUE
- lappend envlist "$clientenv($i) $envid"
- }
-
- # Run a modified test001 in the master.
- puts "\tRep$tnum.a: Running test001 in replicated env."
- eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
-
- # Process all the messages and close the master.
- process_msgs $envlist
- error_check_good masterenv_close [$masterenv close] 0
- set envlist [lreplace $envlist 0 0]
-
- for { set i 0 } { $i < $nclients } { incr i } {
- replclear [expr $i + 2]
- }
- #
- # We set up the error list for each client. We know that the
- # first client is the one calling the election, therefore, add
- # the error location on sending the message (electsend) for that one.
- set m "Rep$tnum"
- set count 0
- set win -1
- #
- # A full test can take a long time to run. For normal testing
- # pare it down a lot so that it runs in a shorter time.
- #
- set c0err { none electinit none none }
- set c1err $c0err
- set c2err $c0err
- set numtests [expr [llength $c0err] * [llength $c1err] * \
- [llength $c2err]]
- puts "\t$m.b: Starting $numtests election with error tests"
- set last_win -1
- set win -1
- foreach c0 $c0err {
- foreach c1 $c1err {
- foreach c2 $c2err {
- set elist [list $c0 $c1 $c2]
- rep005_elect env_cmd envlist $qdir \
- $m $count win last_win $elist $logset
- incr count
- }
- }
- }
-
- foreach pair $envlist {
- set cenv [lindex $pair 0]
- error_check_good cenv_close [$cenv close] 0
- }
-
- replclose $testdir/MSGQUEUEDIR
- puts -nonewline \
- "Rep$tnum: Completed at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
-}
-
-proc rep005_elect { ecmd celist qdir msg count \
- winner lsn_lose elist logset} {
- global elect_timeout elect_serial
- global is_windows_test
- upvar $ecmd env_cmd
- upvar $celist envlist
- upvar $winner win
- upvar $lsn_lose last_win
-
- set elect_timeout 5000000
- set nclients [llength $elist]
- set nsites [expr $nclients + 1]
-
- set cl_list {}
- foreach pair $envlist {
- set id [lindex $pair 1]
- set i [expr $id - 2]
- set clientenv($i) [lindex $pair 0]
- set err_cmd($i) [lindex $elist $i]
- set elect_pipe($i) INVALID
- replclear $id
- lappend cl_list $i
- }
-
- # Select winner. We want to test biggest LSN wins, and secondarily
- # highest priority wins. If we already have a master, make sure
- # we don't start a client in that master.
- set el 0
- if { $win == -1 } {
- if { $last_win != -1 } {
- set cl_list [lreplace $cl_list $last_win $last_win]
- set el $last_win
- }
- set windex [berkdb random_int 0 [expr [llength $cl_list] - 1]]
- set win [lindex $cl_list $windex]
- } else {
- # Easy case, if we have a master, the winner must be the
- # same one as last time, just use $win.
- # If client0 is the current existing master, start the
- # election in client 1.
- if {$win == 0} {
- set el 1
- }
- }
- # Winner has priority 100. If we are testing LSN winning, the
- # make sure the lowest LSN client has the highest priority.
- # Everyone else has priority 10.
- for { set i 0 } { $i < $nclients } { incr i } {
- set crash($i) 0
- if { $i == $win } {
- set pri($i) 100
- } elseif { $i == $last_win } {
- set pri($i) 200
- } else {
- set pri($i) 10
- }
- }
-
- puts "\t$msg.b.$count: Start election (win=client$win) $elist"
- set msg $msg.c.$count
- set nsites $nclients
- set nvotes $nsites
- run_election env_cmd envlist err_cmd pri crash \
- $qdir $msg $el $nsites $nvotes $nclients $win
- #
- # Sometimes test elections with an existing master.
- # Other times test elections without master by closing the
- # master we just elected and creating a new client.
- # We want to weight it to close the new master. So, use
- # a list to cause closing about 70% of the time.
- #
- set close_list { 0 0 0 1 1 1 1 1 1 1}
- set close_len [expr [llength $close_list] - 1]
- set close_index [berkdb random_int 0 $close_len]
- if { [lindex $close_list $close_index] == 1 } {
- puts -nonewline "\t\t$msg: Closing "
- error_check_good newmaster_close [$clientenv($win) close] 0
- #
- # If the next test should win via LSN then remove the
- # env before starting the new client so that we
- # can guarantee this client doesn't win the next one.
- set lsn_win { 0 0 0 0 1 1 1 1 1 1 }
- set lsn_len [expr [llength $lsn_win] - 1]
- set lsn_index [berkdb random_int 0 $lsn_len]
- set rec_arg ""
- set win_inmem [expr [string compare [lindex $logset \
- [expr $win + 1]] in-memory] == 0]
- if { [lindex $lsn_win $lsn_index] == 1 } {
- set last_win $win
- set dirindex [lsearch -exact $env_cmd($win) "-home"]
- incr dirindex
- set lsn_dir [lindex $env_cmd($win) $dirindex]
- env_cleanup $lsn_dir
- puts -nonewline "and cleaning "
- } else {
- #
- # If we're not cleaning the env, decide if we should
- # run recovery upon reopening the env. This causes
- # two things:
- # 1. Removal of region files which forces the env
- # to read its __db.rep.egen file.
- # 2. Adding a couple log records, so this client must
- # be the next winner as well since it'll have the
- # biggest LSN.
- #
- set rec_win { 0 0 0 0 0 0 1 1 1 1 }
- set rec_len [expr [llength $rec_win] - 1]
- set rec_index [berkdb random_int 0 $rec_len]
- if { [lindex $rec_win $rec_index] == 1 } {
- puts -nonewline "and recovering "
- set rec_arg "-recover"
- #
- # If we're in memory and about to run
- # recovery, we force ourselves not to win
- # the next election because recovery will
- # blow away the entire log in memory.
- # However, we don't skip this entirely
- # because we still want to force reading
- # of __db.rep.egen.
- #
- if { $win_inmem } {
- set last_win $win
- } else {
- set last_win -1
- }
- } else {
- set last_win -1
- }
- }
- puts "new master, new client $win"
- set clientenv($win) [eval $env_cmd($win) $rec_arg]
- error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE
- #
- # Since we started a new client, we need to replace it
- # in the message processing list so that we get the
- # new Tcl handle name in there.
- set newel "$clientenv($win) [expr $win + 2]"
- set envlist [lreplace $envlist $win $win $newel]
- if { $rec_arg == "" || $win_inmem } {
- set win -1
- }
- #
- # Since we started a new client we want to give them
- # all a chance to process everything outstanding before
- # the election on the next iteration.
- #
- process_msgs $envlist
- }
-}
diff --git a/storage/bdb/test/reputils.tcl b/storage/bdb/test/reputils.tcl
deleted file mode 100644
index f25da575ad1..00000000000
--- a/storage/bdb/test/reputils.tcl
+++ /dev/null
@@ -1,1275 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: reputils.tcl,v 11.84 2004/11/03 18:50:52 carol Exp $
-#
-# Replication testing utilities
-
-# Environment handle for the env containing the replication "communications
-# structure" (really a CDB environment).
-
-# The test environment consists of a queue and a # directory (environment)
-# per replication site. The queue is used to hold messages destined for a
-# particular site and the directory will contain the environment for the
-# site. So the environment looks like:
-# $testdir
-# ___________|______________________________
-# / | \ \
-# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
-# | | ... |
-# 1 2 .. N+1
-#
-# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
-# queues 2 - N+1.
-#
-# The globals repenv(1-N) contain the environment handles for the sites
-# with a given id (i.e., repenv(1) is the master's environment.
-
-global queueenv
-
-# Array of DB handles, one per machine ID, for the databases that contain
-# messages.
-global queuedbs
-global machids
-global perm_response_list
-set perm_response_list {}
-global perm_sent_list
-set perm_sent_list {}
-global elect_timeout
-set elect_timeout 50000000
-set drop 0
-
-# The default for replication testing is for logs to be on-disk.
-# Mixed-mode log testing provides a mixture of on-disk and
-# in-memory logging, or even all in-memory. When testing on a
-# 1-master/1-client test, we try all four options. On a test
-# with more clients, we still try four options, randomly
-# selecting whether the later clients are on-disk or in-memory.
-#
-
-global mixed_mode_logging
-set mixed_mode_logging 0
-
-proc create_logsets { nsites } {
- global mixed_mode_logging
- global logsets
- global rand_init
-
- error_check_good set_random_seed [berkdb srand $rand_init] 0
- if { $mixed_mode_logging == 0 } {
- set loglist {}
- for { set i 0 } { $i < $nsites } { incr i } {
- lappend loglist "on-disk"
- }
- set logsets [list $loglist]
- }
- if { $mixed_mode_logging == 1 } {
- set set1 {on-disk on-disk}
- set set2 {on-disk in-memory}
- set set3 {in-memory on-disk}
- set set4 {in-memory in-memory}
-
- # Start with nsites at 2 since we already set up
- # the master and first client.
- for { set i 2 } { $i < $nsites } { incr i } {
- foreach set { set1 set2 set3 set4 } {
- if { [berkdb random_int 0 1] == 0 } {
- lappend $set "on-disk"
- } else {
- lappend $set "in-memory"
- }
- }
- }
- set logsets [list $set1 $set2 $set3 $set4]
- }
- return $logsets
-}
-
-proc run_mixedmode { method test {display 0} {run 1} \
- {outfile stdout} {largs ""} } {
- global mixed_mode_logging
- set mixed_mode_logging 1
-
- set prefix [string range $test 0 2]
- if { $prefix != "rep" } {
- puts "Skipping mixed-mode log testing for non-rep test."
- set mixed_mode_logging 0
- return
- }
-
- eval run_method $method $test $display $run $outfile $largs
-
- # Reset to default values after run.
- set mixed_mode_logging 0
-}
-
-# Create the directory structure for replication testing.
-# Open the master and client environments; store these in the global repenv
-# Return the master's environment: "-env masterenv"
-proc repl_envsetup { envargs largs test {nclients 1} {droppct 0} { oob 0 } } {
- source ./include.tcl
- global clientdir
- global drop drop_msg
- global masterdir
- global repenv
- global testdir
-
- env_cleanup $testdir
-
- replsetup $testdir/MSGQUEUEDIR
-
- set masterdir $testdir/MASTERDIR
- file mkdir $masterdir
- if { $droppct != 0 } {
- set drop 1
- set drop_msg [expr 100 / $droppct]
- } else {
- set drop 0
- }
-
- for { set i 0 } { $i < $nclients } { incr i } {
- set clientdir($i) $testdir/CLIENTDIR.$i
- file mkdir $clientdir($i)
- }
-
- # Open a master.
- repladd 1
- #
- # Set log smaller than default to force changing files,
- # but big enough so that the tests that use binary files
- # as keys/data can run.
- #
- set logmax [expr 3 * 1024 * 1024]
- set ma_cmd "berkdb_env -create -log_max $logmax $envargs \
- -lock_max 10000 \
- -home $masterdir -txn nosync -rep_master -rep_transport \
- \[list 1 replsend\]"
-# set ma_cmd "berkdb_env_noerr -create -log_max $logmax $envargs \
-# -lock_max 10000 -verbose {rep on} -errfile /dev/stderr \
-# -errpfx $masterdir \
-# -home $masterdir -txn nosync -rep_master -rep_transport \
-# \[list 1 replsend\]"
- set masterenv [eval $ma_cmd]
- error_check_good master_env [is_valid_env $masterenv] TRUE
- set repenv(master) $masterenv
-
- # Open clients
- for { set i 0 } { $i < $nclients } { incr i } {
- set envid [expr $i + 2]
- repladd $envid
- set cl_cmd "berkdb_env -create $envargs -txn nosync \
- -cachesize { 0 10000000 0 } -lock_max 10000 \
- -home $clientdir($i) -rep_client -rep_transport \
- \[list $envid replsend\]"
-# set cl_cmd "berkdb_env_noerr -create $envargs -txn nosync \
-# -cachesize { 0 10000000 0 } -lock_max 10000 \
-# -home $clientdir($i) -rep_client -rep_transport \
-# \[list $envid replsend\] -verbose {rep on} \
-# -errfile /dev/stderr -errpfx $clientdir($i)"
- set clientenv [eval $cl_cmd]
- error_check_good client_env [is_valid_env $clientenv] TRUE
- set repenv($i) $clientenv
- }
- set repenv($i) NULL
- append largs " -env $masterenv "
-
- # Process startup messages
- repl_envprocq $test $nclients $oob
-
- return $largs
-}
-
-# Process all incoming messages. Iterate until there are no messages left
-# in anyone's queue so that we capture all message exchanges. We verify that
-# the requested number of clients matches the number of client environments
-# we have. The oob parameter indicates if we should process the queue
-# with out-of-order delivery. The replprocess procedure actually does
-# the real work of processing the queue -- this routine simply iterates
-# over the various queues and does the initial setup.
-proc repl_envprocq { test { nclients 1 } { oob 0 }} {
- global repenv
- global drop
-
- set masterenv $repenv(master)
- for { set i 0 } { 1 } { incr i } {
- if { $repenv($i) == "NULL"} {
- break
- }
- }
- error_check_good i_nclients $nclients $i
-
- berkdb debug_check
- puts -nonewline "\t$test: Processing master/$i client queues"
- set rand_skip 0
- if { $oob } {
- puts " out-of-order"
- } else {
- puts " in order"
- }
- set do_check 1
- set droprestore $drop
- while { 1 } {
- set nproced 0
-
- if { $oob } {
- set rand_skip [berkdb random_int 2 10]
- }
- incr nproced [replprocessqueue $masterenv 1 $rand_skip]
- for { set i 0 } { $i < $nclients } { incr i } {
- set envid [expr $i + 2]
- if { $oob } {
- set rand_skip [berkdb random_int 2 10]
- }
- set n [replprocessqueue $repenv($i) \
- $envid $rand_skip]
- incr nproced $n
- }
-
- if { $nproced == 0 } {
- # Now that we delay requesting records until
- # we've had a few records go by, we should always
- # see that the number of requests is lower than the
- # number of messages that were enqueued.
- for { set i 0 } { $i < $nclients } { incr i } {
- set clientenv $repenv($i)
- set queued [stat_field $clientenv rep_stat \
- "Total log records queued"]
- error_check_bad queued_stats \
- $queued -1
- set requested [stat_field $clientenv rep_stat \
- "Log records requested"]
- error_check_bad requested_stats \
- $requested -1
- if { $queued != 0 && $do_check != 0 } {
- error_check_good num_requested \
- [expr $requested <= $queued] 1
- }
-
- $clientenv rep_request 1 1
- }
-
- # If we were dropping messages, we might need
- # to flush the log so that we get everything
- # and end up in the right state.
- if { $drop != 0 } {
- set drop 0
- set do_check 0
- $masterenv rep_flush
- berkdb debug_check
- puts "\t$test: Flushing Master"
- } else {
- break
- }
- }
- }
-
- # Reset the clients back to the default state in case we
- # have more processing to do.
- for { set i 0 } { $i < $nclients } { incr i } {
- set clientenv $repenv($i)
- $clientenv rep_request 4 128
- }
- set drop $droprestore
-}
-
-# Verify that the directories in the master are exactly replicated in
-# each of the client environments.
-proc repl_envver0 { test method { nclients 1 } } {
- global clientdir
- global masterdir
- global repenv
-
- # Verify the database in the client dir.
- # First dump the master.
- set t1 $masterdir/t1
- set t2 $masterdir/t2
- set t3 $masterdir/t3
- set omethod [convert_method $method]
-
- #
- # We are interested in the keys of whatever databases are present
- # in the master environment, so we just call a no-op check function
- # since we have no idea what the contents of this database really is.
- # We just need to walk the master and the clients and make sure they
- # have the same contents.
- #
- set cwd [pwd]
- cd $masterdir
- set stat [catch {glob test*.db} dbs]
- cd $cwd
- if { $stat == 1 } {
- return
- }
- foreach testfile $dbs {
- open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
- repl_noop dump_file_direction "-first" "-next"
-
- if { [string compare [convert_method $method] -recno] != 0 } {
- filesort $t2 $t3
- file rename -force $t3 $t2
- }
- for { set i 0 } { $i < $nclients } { incr i } {
- puts "\t$test: Verifying client $i database $testfile contents."
- open_and_dump_file $testfile $repenv($i) \
- $t1 repl_noop dump_file_direction "-first" "-next"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- } else {
- catch {file copy -force $t1 $t3} ret
- }
- error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
- }
- }
-}
-
-# Remove all the elements from the master and verify that these
-# deletions properly propagated to the clients.
-proc repl_verdel { test method { nclients 1 } } {
- global clientdir
- global masterdir
- global repenv
-
- # Delete all items in the master.
- set cwd [pwd]
- cd $masterdir
- set stat [catch {glob test*.db} dbs]
- cd $cwd
- if { $stat == 1 } {
- return
- }
- foreach testfile $dbs {
- puts "\t$test: Deleting all items from the master."
- set txn [$repenv(master) txn]
- error_check_good txn_begin [is_valid_txn $txn \
- $repenv(master)] TRUE
- set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
- error_check_good reopen_master [is_valid_db $db] TRUE
- set dbc [$db cursor -txn $txn]
- error_check_good reopen_master_cursor \
- [is_valid_cursor $dbc $db] TRUE
- for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
- { set dbt [$dbc get -next] } {
- error_check_good del_item [$dbc del] 0
- }
- error_check_good dbc_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
- error_check_good db_close [$db close] 0
-
- repl_envprocq $test $nclients
-
- # Check clients.
- for { set i 0 } { $i < $nclients } { incr i } {
- puts "\t$test: Verifying client database $i is empty."
-
- set db [berkdb_open -env $repenv($i) $testfile]
- error_check_good reopen_client($i) \
- [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good reopen_client_cursor($i) \
- [is_valid_cursor $dbc $db] TRUE
-
- error_check_good client($i)_empty \
- [llength [$dbc get -first]] 0
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- }
- }
-}
-
-# Replication "check" function for the dump procs that expect to
-# be able to verify the keys and data.
-proc repl_noop { k d } {
- return
-}
-
-# Close all the master and client environments in a replication test directory.
-proc repl_envclose { test envargs } {
- source ./include.tcl
- global clientdir
- global encrypt
- global masterdir
- global repenv
- global testdir
-
- if { [lsearch $envargs "-encrypta*"] !=-1 } {
- set encrypt 1
- }
-
- # In order to make sure that we have fully-synced and ready-to-verify
- # databases on all the clients, do a checkpoint on the master and
- # process messages in order to flush all the clients.
- set drop 0
- set do_check 0
- berkdb debug_check
- puts "\t$test: Checkpointing master."
- error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
-
- # Count clients.
- for { set ncli 0 } { 1 } { incr ncli } {
- if { $repenv($ncli) == "NULL" } {
- break
- }
- }
- repl_envprocq $test $ncli
-
- error_check_good masterenv_close [$repenv(master) close] 0
- verify_dir $masterdir "\t$test: " 0 0 1
- for { set i 0 } { $i < $ncli } { incr i } {
- error_check_good client($i)_close [$repenv($i) close] 0
- verify_dir $clientdir($i) "\t$test: " 0 0 1
- }
- replclose $testdir/MSGQUEUEDIR
-
-}
-
-# Close up a replication group
-proc replclose { queuedir } {
- global queueenv queuedbs machids
-
- foreach m $machids {
- set db $queuedbs($m)
- error_check_good dbr_close [$db close] 0
- }
- error_check_good qenv_close [$queueenv close] 0
- set machids {}
-}
-
-# Create a replication group for testing.
-proc replsetup { queuedir } {
- global queueenv queuedbs machids
-
- file mkdir $queuedir
- set queueenv \
- [berkdb_env -create -txn nosync -lock_max 20000 -home $queuedir]
- error_check_good queueenv [is_valid_env $queueenv] TRUE
-
- if { [info exists queuedbs] } {
- unset queuedbs
- }
- set machids {}
-
- return $queueenv
-}
-
-# Send function for replication.
-proc replsend { control rec fromid toid flags lsn } {
- global queuedbs queueenv machids
- global drop drop_msg
- global perm_sent_list
- if { [llength $perm_sent_list] != 0 && $flags == "perm" } {
-# puts "replsend sent perm message, LSN $lsn"
- lappend perm_sent_list $lsn
- }
-
- #
- # If we are testing with dropped messages, then we drop every
- # $drop_msg time. If we do that just return 0 and don't do
- # anything.
- #
- if { $drop != 0 } {
- incr drop
- if { $drop == $drop_msg } {
- set drop 1
- return 0
- }
- }
- # XXX
- # -1 is DB_BROADCAST_MID
- if { $toid == -1 } {
- set machlist $machids
- } else {
- if { [info exists queuedbs($toid)] != 1 } {
- error "replsend: machid $toid not found"
- }
- set machlist [list $toid]
- }
-
- foreach m $machlist {
- # XXX should a broadcast include to "self"?
- if { $m == $fromid } {
- continue
- }
-
- set db $queuedbs($m)
- set txn [$queueenv txn]
- $db put -txn $txn -append [list $control $rec $fromid]
- error_check_good replsend_commit [$txn commit] 0
- }
-
- return 0
-}
-
-# Discard all the pending messages for a particular site.
-proc replclear { machid } {
- global queuedbs queueenv
-
- if { [info exists queuedbs($machid)] != 1 } {
- error "FAIL: replclear: machid $machid not found"
- }
-
- set db $queuedbs($machid)
- set txn [$queueenv txn]
- set dbc [$db cursor -txn $txn]
- for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
- { set dbt [$dbc get -rmw -next] } {
- error_check_good replclear($machid)_del [$dbc del] 0
- }
- error_check_good replclear($machid)_dbc_close [$dbc close] 0
- error_check_good replclear($machid)_txn_commit [$txn commit] 0
-}
-
-# Add a machine to a replication environment.
-proc repladd { machid } {
- global queueenv queuedbs machids
-
- if { [info exists queuedbs($machid)] == 1 } {
- error "FAIL: repladd: machid $machid already exists"
- }
-
- set queuedbs($machid) [berkdb open -auto_commit \
- -env $queueenv -create -recno -renumber repqueue$machid.db]
- error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
-
- lappend machids $machid
-}
-
-# Acquire a handle to work with an existing machine's replication
-# queue. This is for situations where more than one process
-# is working with a message queue. In general, having more than one
-# process handle the queue is wrong. However, in order to test some
-# things, we need two processes (since Tcl doesn't support threads). We
-# go to great pain in the test harness to make sure this works, but we
-# don't let customers do it.
-proc repljoin { machid } {
- global queueenv queuedbs machids
-
- set queuedbs($machid) [berkdb open -auto_commit \
- -env $queueenv repqueue$machid.db]
- error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
-
- lappend machids $machid
-}
-
-# Process a queue of messages, skipping every "skip_interval" entry.
-# We traverse the entire queue, but since we skip some messages, we
-# may end up leaving things in the queue, which should get picked up
-# on a later run.
-proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \
- { newmasterp NONE } { dupmasterp NONE } { errp NONE } } {
- global queuedbs queueenv errorCode
- global perm_response_list
- global startup_done
-
- # hold_electp is a call-by-reference variable which lets our caller
- # know we need to hold an election.
- if { [string compare $hold_electp NONE] != 0 } {
- upvar $hold_electp hold_elect
- }
- set hold_elect 0
-
- # newmasterp is the same idea, only returning the ID of a master
- # given in a DB_REP_NEWMASTER return.
- if { [string compare $newmasterp NONE] != 0 } {
- upvar $newmasterp newmaster
- }
- set newmaster 0
-
- # dupmasterp is a call-by-reference variable which lets our caller
- # know we have a duplicate master.
- if { [string compare $dupmasterp NONE] != 0 } {
- upvar $dupmasterp dupmaster
- }
- set dupmaster 0
-
- # errp is a call-by-reference variable which lets our caller
- # know we have gotten an error (that they expect).
- if { [string compare $errp NONE] != 0 } {
- upvar $errp errorp
- }
- set errorp 0
-
- set nproced 0
-
- set txn [$queueenv txn]
-
- # If we are running separate processes, the second process has
- # to join an existing message queue.
- if { [info exists queuedbs($machid)] == 0 } {
- repljoin $machid
- }
-
- set dbc [$queuedbs($machid) cursor -txn $txn]
-
- error_check_good process_dbc($machid) \
- [is_valid_cursor $dbc $queuedbs($machid)] TRUE
-
- for { set dbt [$dbc get -first] } \
- { [llength $dbt] != 0 } \
- { } {
- set data [lindex [lindex $dbt 0] 1]
- set recno [lindex [lindex $dbt 0] 0]
-
- # If skip_interval is nonzero, we want to process messages
- # out of order. We do this in a simple but slimy way--
- # continue walking with the cursor without processing the
- # message or deleting it from the queue, but do increment
- # "nproced". The way this proc is normally used, the
- # precise value of nproced doesn't matter--we just don't
- # assume the queues are empty if it's nonzero. Thus,
- # if we contrive to make sure it's nonzero, we'll always
- # come back to records we've skipped on a later call
- # to replprocessqueue. (If there really are no records,
- # we'll never get here.)
- #
- # Skip every skip_interval'th record (and use a remainder other
- # than zero so that we're guaranteed to really process at least
- # one record on every call).
- if { $skip_interval != 0 } {
- if { $nproced % $skip_interval == 1 } {
- incr nproced
- set dbt [$dbc get -next]
- continue
- }
- }
-
- # We need to remove the current message from the queue,
- # because we're about to end the transaction and someone
- # else processing messages might come in and reprocess this
- # message which would be bad.
- error_check_good queue_remove [$dbc del] 0
-
- # We have to play an ugly cursor game here: we currently
- # hold a lock on the page of messages, but rep_process_message
- # might need to lock the page with a different cursor in
- # order to send a response. So save the next recno, close
- # the cursor, and then reopen and reset the cursor.
- # If someone else is processing this queue, our entry might
- # have gone away, and we need to be able to handle that.
-
- error_check_good dbc_process_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
-
- set ret [catch {$dbenv rep_process_message \
- [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
-
- # Save all ISPERM and NOTPERM responses so we can compare their
- # LSNs to the LSN in the log. The variable perm_response_list
- # holds the entire response so we can extract responses and
- # LSNs as needed.
- #
- if { [llength $perm_response_list] != 0 && \
- ([is_substr $res ISPERM] || [is_substr $res NOTPERM]) } {
- lappend perm_response_list $res
- }
-
- if { $ret != 0 } {
- if { [string compare $errp NONE] != 0 } {
- set errorp "$dbenv $machid $res"
- } else {
- error "FAIL:[timestamp]\
- rep_process_message returned $res"
- }
- }
-
- incr nproced
-
- # Now, re-establish the cursor position. We fetch the
- # current record number. If there is something there,
- # that is the record for the next iteration. If there
- # is nothing there, then we've consumed the last item
- # in the queue.
-
- set txn [$queueenv txn]
- set dbc [$queuedbs($machid) cursor -txn $txn]
- set dbt [$dbc get -set_range $recno]
-
- if { $ret == 0 } {
- set rettype [lindex $res 0]
- set retval [lindex $res 1]
- #
- # Do nothing for 0 and NEWSITE
- #
- if { [is_substr $rettype STARTUPDONE] } {
- set startup_done 1
- }
- if { [is_substr $rettype HOLDELECTION] } {
- set hold_elect 1
- }
- if { [is_substr $rettype DUPMASTER] } {
- set dupmaster "1 $dbenv $machid"
- }
- if { [is_substr $rettype NOTPERM] || \
- [is_substr $rettype ISPERM] } {
- set lsnfile [lindex $retval 0]
- set lsnoff [lindex $retval 1]
- }
- if { [is_substr $rettype NEWMASTER] } {
- set newmaster $retval
- # Break as soon as we get a NEWMASTER message;
- # our caller needs to handle it.
- break
- }
- }
-
- if { $errorp != 0 } {
- # Break also on an error, caller wants to handle it.
- break
- }
- if { $hold_elect == 1 } {
- # Break also on a HOLDELECTION, for the same reason.
- break
- }
- if { $dupmaster == 1 } {
- # Break also on a DUPMASTER, for the same reason.
- break
- }
-
- }
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
-
- # Return the number of messages processed.
- return $nproced
-}
-
-set run_repl_flag "-run_repl"
-
-proc extract_repl_args { args } {
- global run_repl_flag
-
- for { set arg [lindex $args [set i 0]] } \
- { [string length $arg] > 0 } \
- { set arg [lindex $args [incr i]] } {
- if { [string compare $arg $run_repl_flag] == 0 } {
- return [lindex $args [expr $i + 1]]
- }
- }
- return ""
-}
-
-proc delete_repl_args { args } {
- global run_repl_flag
-
- set ret {}
-
- for { set arg [lindex $args [set i 0]] } \
- { [string length $arg] > 0 } \
- { set arg [lindex $args [incr i]] } {
- if { [string compare $arg $run_repl_flag] != 0 } {
- lappend ret $arg
- } else {
- incr i
- }
- }
- return $ret
-}
-
-global elect_serial
-global elections_in_progress
-set elect_serial 0
-
-# Start an election in a sub-process.
-proc start_election \
- { pfx qdir envstring nsites nvotes pri timeout {err "none"} {crash 0}} {
- source ./include.tcl
- global elect_serial elect_timeout elections_in_progress machids
-
- set filelist {}
- set ret [catch {glob $testdir/ELECTION*.$elect_serial} result]
- if { $ret == 0 } {
- set filelist [concat $filelist $result]
- }
- foreach f $filelist {
- fileremove -f $f
- }
-
- set oid [open $testdir/ELECTION_SOURCE.$elect_serial w]
-
- puts $oid "source $test_path/test.tcl"
- puts $oid "replsetup $qdir"
- foreach i $machids { puts $oid "repladd $i" }
- puts $oid "set env_cmd \{$envstring\}"
-# puts $oid "set dbenv \[eval \$env_cmd -errfile \
-# $testdir/ELECTION_ERRFILE.$elect_serial -errpfx $pfx \]"
- puts $oid "set dbenv \[eval \$env_cmd -errfile \
- /dev/stdout -errpfx $pfx \]"
- puts $oid "\$dbenv test abort $err"
- puts $oid "set res \[catch \{\$dbenv rep_elect $nsites $nvotes $pri \
- $elect_timeout\} ret\]"
- puts $oid "set r \[open \$testdir/ELECTION_RESULT.$elect_serial w\]"
- puts $oid "if \{\$res == 0 \} \{"
- puts $oid "puts \$r \"NEWMASTER \$ret\""
- puts $oid "\} else \{"
- puts $oid "puts \$r \"ERROR \$ret\""
- puts $oid "\}"
- #
- # This loop calls rep_elect a second time with the error cleared.
- # We don't want to do that if we are simulating a crash.
- if { $err != "none" && $crash != 1 } {
- puts $oid "\$dbenv test abort none"
- puts $oid "set res \[catch \{\$dbenv rep_elect $nsites \
- $nvotes $pri $elect_timeout\} ret\]"
- puts $oid "if \{\$res == 0 \} \{"
- puts $oid "puts \$r \"NEWMASTER \$ret\""
- puts $oid "\} else \{"
- puts $oid "puts \$r \"ERROR \$ret\""
- puts $oid "\}"
- }
- puts $oid "close \$r"
- close $oid
-
-# set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
- set t [open "|$tclsh_path" w]
- puts $t "source ./include.tcl"
- puts $t "source $testdir/ELECTION_SOURCE.$elect_serial"
- flush $t
-
- set elections_in_progress($elect_serial) $t
- return $elect_serial
-}
-
-proc setpriority { priority nclients winner {start 0} } {
- upvar $priority pri
-
- for { set i $start } { $i < [expr $nclients + $start] } { incr i } {
- if { $i == $winner } {
- set pri($i) 100
- } else {
- set pri($i) 10
- }
- }
-}
-
-# run_election has the following arguments:
-# Arrays:
-# ecmd Array of the commands for setting up each client env.
-# cenv Array of the handles to each client env.
-# errcmd Array of where errors should be forced.
-# priority Array of the priorities of each client env.
-# crash If an error is forced, should we crash or recover?
-# The upvar command takes care of making these arrays available to
-# the procedure.
-#
-# Ordinary variables:
-# qdir Directory where the message queue is located.
-# msg Message prefixed to the output.
-# elector This client calls the first election.
-# nsites Number of sites in the replication group.
-# nvotes Number of votes required to win the election.
-# nclients Number of clients participating in the election.
-# win The expected winner of the election.
-# reopen Should the new master (i.e. winner) be closed
-# and reopened as a client?
-# dbname Name of the underlying database. Defaults to
-# the name of the db created by rep_test.
-#
-proc run_election { ecmd celist errcmd priority crsh qdir msg elector \
- nsites nvotes nclients win {reopen 0} {dbname "test.db"} } {
- global elect_timeout elect_serial
- global is_hp_test
- global is_windows_test
- global rand_init
- upvar $ecmd env_cmd
- upvar $celist cenvlist
- upvar $errcmd err_cmd
- upvar $priority pri
- upvar $crsh crash
-
- set elect_timeout 5000000
-
- foreach pair $cenvlist {
- set id [lindex $pair 1]
- set i [expr $id - 2]
- set elect_pipe($i) INVALID
- replclear $id
- }
-
- #
- # XXX
- # We need to somehow check for the warning if nvotes is not
- # a majority. Problem is that warning will go into the child
- # process' output. Furthermore, we need a mechanism that can
- # handle both sending the output to a file and sending it to
- # /dev/stderr when debugging without failing the
- # error_check_good check.
- #
- puts "\t\t$msg.1: Election with nsites=$nsites,\
- nvotes=$nvotes, nclients=$nclients"
- puts "\t\t$msg.2: First elector is $elector,\
- expected winner is $win (eid [expr $win + 2])"
- incr elect_serial
- set pfx "CHILD$elector.$elect_serial"
- # Windows and HP-UX require a longer timeout.
- if { $is_windows_test == 1 || $is_hp_test == 1 } {
- set elect_timeout [expr $elect_timeout * 3]
- }
- set elect_pipe($elector) [start_election \
- $pfx $qdir $env_cmd($elector) $nsites $nvotes $pri($elector) \
- $elect_timeout $err_cmd($elector) $crash($elector)]
-
- tclsleep 2
-
- set got_newmaster 0
- set tries [expr [expr $elect_timeout * 4] / 1000000]
-
- # If we're simulating a crash, skip the while loop and
- # just give the initial election a chance to complete.
- set crashing 0
- for { set i 0 } { $i < $nclients } { incr i } {
- if { $crash($i) == 1 } {
- set crashing 1
- }
- }
-
- if { $crashing == 1 } {
- tclsleep 10
- } else {
- while { 1 } {
- set nproced 0
- set he 0
- set nm 0
- set nm2 0
-
- foreach pair $cenvlist {
- set he 0
- set envid [lindex $pair 1]
- set i [expr $envid - 2]
- set clientenv($i) [lindex $pair 0]
- set child_done [check_election $elect_pipe($i) nm2]
- if { $got_newmaster == 0 && $nm2 != 0 } {
- error_check_good newmaster_is_master2 $nm2 \
- [expr $win + 2]
- set got_newmaster $nm2
-
- # If this env is the new master, it needs to
- # configure itself as such--this is a different
- # env handle from the one that performed the
- # election.
- if { $nm2 == $envid } {
- error_check_good make_master($i) \
- [$clientenv($i) rep_start -master] \
- 0
- }
- }
- incr nproced \
- [replprocessqueue $clientenv($i) $envid 0 he nm]
-# puts "Tries $tries: Processed queue for client $i, $nproced msgs he $he nm $nm nm2 $nm2"
- if { $he == 1 } {
- #
- # Only close down the election pipe if the
- # previously created one is done and
- # waiting for new commands, otherwise
- # if we try to close it while it's in
- # progress we hang this main tclsh.
- #
- if { $elect_pipe($i) != "INVALID" && \
- $child_done == 1 } {
- close_election $elect_pipe($i)
- set elect_pipe($i) "INVALID"
- }
-# puts "Starting election on client $i"
- if { $elect_pipe($i) == "INVALID" } {
- incr elect_serial
- set pfx "CHILD$i.$elect_serial"
- set elect_pipe($i) [start_election \
- $pfx $qdir \
- $env_cmd($i) $nsites \
- $nvotes $pri($i) $elect_timeout]
- set got_hold_elect($i) 1
- }
- }
- if { $nm != 0 } {
- error_check_good newmaster_is_master $nm \
- [expr $win + 2]
- set got_newmaster $nm
-
- # If this env is the new master, it needs to
- # configure itself as such--this is a different
- # env handle from the one that performed the
- # election.
- if { $nm == $envid } {
- error_check_good make_master($i) \
- [$clientenv($i) rep_start -master] \
- 0
- # Occasionally force new log records
- # to be written.
- set write [berkdb random_int 1 10]
- if { $write == 1 } {
- set db [berkdb_open -env \
- $clientenv($i) \
- -auto_commit $dbname]
- error_check_good dbopen \
- [is_valid_db $db] TRUE
- error_check_good dbclose \
- [$db close] 0
- }
- }
- }
- }
-
- # We need to wait around to make doubly sure that the
- # election has finished...
- if { $nproced == 0 } {
- incr tries -1
- if { $tries == 0 } {
- break
- } else {
- tclsleep 1
- }
- } else {
- set tries $tries
- }
- }
-
- # Verify that expected winner is actually the winner.
- error_check_good "client $win wins" $got_newmaster [expr $win + 2]
- }
-
- cleanup_elections
-
- #
- # Make sure we've really processed all the post-election
- # sync-up messages. If we're simulating a crash, don't process
- # any more messages.
- #
- if { $crashing == 0 } {
- process_msgs $cenvlist
- }
-
- if { $reopen == 1 } {
- puts "\t\t$msg.3: Closing new master and reopening as client"
- error_check_good newmaster_close [$clientenv($win) close] 0
-
- set clientenv($win) [eval $env_cmd($win)]
- error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE
- set newelector "$clientenv($win) [expr $win + 2]"
- set cenvlist [lreplace $cenvlist $win $win $newelector]
- if { $crashing == 0 } {
- process_msgs $cenvlist
- }
- }
-}
-
-proc got_newmaster { cenv i newmaster win {dbname "test.db"} } {
- upvar $cenv clientenv
-
- # Check that the new master we got is the one we expected.
- error_check_good newmaster_is_master $newmaster [expr $win + 2]
-
- # If this env is the new master, it needs to configure itself
- # as such -- this is a different env handle from the one that
- # performed the election.
- if { $nm == $envid } {
- error_check_good make_master($i) \
- [$clientenv($i) rep_start -master] 0
- # Occasionally force new log records to be written.
- set write [berkdb random_int 1 10]
- if { $write == 1 } {
- set db [berkdb_open -env $clientenv($i) -auto_commit \
- -create -btree $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
- }
- }
-}
-
-proc check_election { id newmasterp } {
- source ./include.tcl
-
- if { $id == "INVALID" } {
- return 0
- }
- upvar $newmasterp newmaster
- set newmaster 0
- set res [catch {open $testdir/ELECTION_RESULT.$id} nmid]
- if { $res != 0 } {
- return 0
- }
- while { [gets $nmid val] != -1 } {
-# puts "result $id: $val"
- set str [lindex $val 0]
- if { [is_substr $str NEWMASTER] } {
- set newmaster [lindex $val 1]
- }
- }
- close $nmid
- return 1
-}
-
-proc close_election { i } {
- global elections_in_progress
- set t $elections_in_progress($i)
- puts $t "replclose \$testdir/MSGQUEUEDIR"
- puts $t "\$dbenv close"
- close $t
- unset elections_in_progress($i)
-}
-
-proc cleanup_elections { } {
- global elect_serial elections_in_progress
-
- for { set i 0 } { $i <= $elect_serial } { incr i } {
- if { [info exists elections_in_progress($i)] != 0 } {
- close_election $i
- }
- }
-
- set elect_serial 0
-}
-
-#
-# This is essentially a copy of test001, but it only does the put/get
-# loop AND it takes an already-opened db handle.
-#
-proc rep_test { method env repdb {nentries 10000} \
- {start 0} {skip 0} {needpad 0} args } {
- source ./include.tcl
-
- #
- # Open the db if one isn't given. Close before exit.
- #
- if { $repdb == "NULL" } {
- set testfile "test.db"
- set largs [convert_args $method $args]
- set omethod [convert_method $method]
- set db [eval {berkdb_open_noerr -env $env -auto_commit -create \
- -mode 0644} $largs $omethod $testfile]
- error_check_good reptest_db [is_valid_db $db] TRUE
- } else {
- set db $repdb
- }
-
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- # If we are not using an external env, then test setting
- # the database cache size and using multiple caches.
- puts "\t\tRep_test: $method $nentries key/data pairs starting at $start"
- set did [open $dict]
-
- # The "start" variable determines the record number to start
- # with, if we're using record numbers. The "skip" variable
- # determines which dictionary entry to start with. In normal
- # use, skip is equal to start.
-
- if { $skip != 0 } {
- for { set count 0 } { $count < $skip } { incr count } {
- gets $did str
- }
- }
- set pflags ""
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
- puts "\t\tRep_test.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1 + $start]
- if { 0xffffffff > 0 && $key > 0xffffffff } {
- set key [expr $key - 0x100000000]
- }
- if { $key == 0 || $key - 0xffffffff == 1 } {
- incr key
- incr count
- }
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- set str [reverse $str]
- }
- #
- # We want to make sure we send in exactly the same
- # length data so that LSNs match up for some tests
- # in replication (rep021).
- #
- if { [is_fixed_length $method] == 1 && $needpad } {
- #
- # Make it something visible and obvious, 'A'.
- #
- set p 65
- set str [make_fixed_length $method $str $p]
- set kvals($key) $str
- }
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- set ret [eval \
- {$db put} $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- error_check_good txn [$t commit] 0
-
- # Checkpoint 10 times during the run, but not more
- # frequently than every 5 entries.
- set checkfreq [expr $nentries / 10]
- if { $checkfreq < 5 } {
- set checkfreq 5
- }
- if { $count % $checkfreq == 0 } {
- error_check_good txn_checkpoint($count) \
- [$env txn_checkpoint] 0
- }
- incr count
- }
- close $did
- if { $repdb == "NULL" } {
- error_check_good rep_close [$db close] 0
- }
-}
-
-proc process_msgs { elist {perm_response 0} {dupp NONE} {errp NONE} } {
- if { $perm_response == 1 } {
- global perm_response_list
- set perm_response_list {{}}
- }
-
- if { [string compare $dupp NONE] != 0 } {
- upvar $dupp dupmaster
- set dupmaster 0
- } else {
- set dupmaster NONE
- }
-
- if { [string compare $errp NONE] != 0 } {
- upvar $errp errorp
- set errorp 0
- } else {
- set errorp NONE
- }
-
- while { 1 } {
- set nproced 0
- foreach pair $elist {
- set envname [lindex $pair 0]
- set envid [lindex $pair 1]
- #
- # If we need to send in all the other args
- incr nproced [replprocessqueue $envname $envid \
- 0 NONE NONE dupmaster errorp]
- #
- # If the user is expecting to handle an error and we get
- # one, return the error immediately.
- #
- if { $dupmaster != 0 && $dupmaster != "NONE" } {
- return
- }
- if { $errorp != 0 && $errorp != "NONE" } {
- return
- }
- }
- if { $nproced == 0 } {
- break
- }
- }
-}
diff --git a/storage/bdb/test/rpc001.tcl b/storage/bdb/test/rpc001.tcl
deleted file mode 100644
index e07d5dcb162..00000000000
--- a/storage/bdb/test/rpc001.tcl
+++ /dev/null
@@ -1,476 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rpc001.tcl,v 11.41 2004/06/01 19:55:25 carol Exp $
-#
-# TEST rpc001
-# TEST Test RPC server timeouts for cursor, txn and env handles.
-proc rpc001 { } {
- global __debug_on
- global __debug_print
- global errorInfo
- global is_je_test
- global rpc_svc
- source ./include.tcl
-
- #
- # First test timeouts on server.
- #
- set ttime 5
- set itime 10
- puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
- set dpid [rpc_server_start 0 30 -t $ttime -I $itime]
- puts "\tRpc001.a: Started server, pid $dpid"
-
- #
- # Wrap the whole test in a catch statement so we can still kill
- # the rpc server even if the test fails.
- #
- set status [catch {
- tclsleep 2
- remote_cleanup $rpc_server $rpc_testdir $testdir
-
- puts "\tRpc001.b: Creating environment"
-
- set testfile "rpc001.db"
- set home [file tail $rpc_testdir]
-
- set env [eval {berkdb_env -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000 -txn}]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- puts "\tRpc001.c: Opening a database"
- #
- # NOTE: the type of database doesn't matter, just use btree.
- set db [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set curs_list {}
- set txn_list {}
- puts "\tRpc001.d: Basic timeout test"
- puts "\tRpc001.d1: Starting a transaction"
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- lappend txn_list $txn
-
- puts "\tRpc001.d2: Open a cursor in that transaction"
- set dbc [$db cursor -txn $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- lappend curs_list $dbc
-
- puts "\tRpc001.d3: Duplicate that cursor"
- set dbc [$dbc dup]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- lappend curs_list $dbc
-
- if { !$is_je_test } {
- puts "\tRpc001.d4: Starting a nested transaction"
- set txn [$env txn -parent $txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set txn_list [linsert $txn_list 0 $txn]
- }
-
- puts "\tRpc001.d5: Create a cursor, no transaction"
- set dbc [$db cursor]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- lappend curs_list $dbc
-
- puts "\tRpc001.d6: Timeout cursor and transactions"
- set sleeptime [expr $ttime + 2]
- tclsleep $sleeptime
-
- #
- # Perform a generic db operations to cause the timeout routine
- # to trigger.
- #
- set stat [catch {$db stat} ret]
- error_check_good dbstat $stat 0
-
- #
- # Check that every handle we opened above is timed out
- #
- foreach c $curs_list {
- set stat [catch {$c close} ret]
- error_check_good dbc_close:$c $stat 1
- error_check_good dbc_timeout:$c \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
- }
- foreach t $txn_list {
- set stat [catch {$t commit} ret]
- error_check_good txn_commit:$t $stat 1
- error_check_good txn_timeout:$t \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
- }
-
- set txn_list {}
-
- if { !$is_je_test } {
- set ntxns 8
- puts "\tRpc001.e: Nested ($ntxns x $ntxns) txn activity test"
- puts "\tRpc001.e1: Starting parent transaction"
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set txn_list [linsert $txn_list 0 $txn]
- set last_txn $txn
- set parent_txn $txn
-
- #
- # First set a breadth of 'ntxns'
- # We need 2 from this set for testing later on. Just
- # set them up separately first.
- #
- puts "\tRpc001.e2: Creating $ntxns child transactions"
- set child0 [$env txn -parent $parent_txn]
- error_check_good txn_begin \
- [is_valid_txn $child0 $env] TRUE
- set child1 [$env txn -parent $parent_txn]
- error_check_good txn_begin \
- [is_valid_txn $child1 $env] TRUE
-
- for {set i 2} {$i < $ntxns} {incr i} {
- set txn [$env txn -parent $parent_txn]
- error_check_good txn_begin \
- [is_valid_txn $txn $env] TRUE
- set txn_list [linsert $txn_list 0 $txn]
- }
-
- #
- # Now make one 'ntxns' deeply nested.
- # Add one more for testing later on separately.
- #
- puts "\tRpc001.e3: Creating $ntxns nested child transactions"
- for {set i 0} {$i < $ntxns} {incr i} {
- set txn [$env txn -parent $last_txn]
- error_check_good txn_begin \
- [is_valid_txn $txn $env] TRUE
- set txn_list [linsert $txn_list 0 $txn]
- set last_txn $txn
- }
- set last_parent $last_txn
- set last_txn [$env txn -parent $last_parent]
- error_check_good txn_begin \
- [is_valid_txn $last_txn $env] TRUE
-
- puts "\tRpc001.e4: Open a cursor in deepest transaction"
- set dbc [$db cursor -txn $last_txn]
- error_check_good db_cursor \
- [is_valid_cursor $dbc $db] TRUE
-
- puts "\tRpc001.e5: Duplicate that cursor"
- set dbcdup [$dbc dup]
- error_check_good db_cursor \
- [is_valid_cursor $dbcdup $db] TRUE
- lappend curs_list $dbcdup
-
- puts "\tRpc001.f: Timeout then activate duplicate cursor"
- tclsleep $sleeptime
- set stat [catch {$dbcdup close} ret]
- error_check_good dup_close:$dbcdup $stat 0
- error_check_good dup_close:$dbcdup $ret 0
-
- #
- # Make sure that our parent txn is not timed out. We
- # will try to begin another child tnx using the parent.
- # We expect that to succeed. Immediately commit that
- # txn.
- #
- set stat [catch {$env txn -parent $parent_txn} newchild]
- error_check_good newchildtxn $stat 0
- error_check_good newcommit [$newchild commit] 0
-
- puts "\tRpc001.g: Timeout, then activate cursor"
- tclsleep $sleeptime
- set stat [catch {$dbc close} ret]
- error_check_good dbc_close:$dbc $stat 0
- error_check_good dbc_close:$dbc $ret 0
-
- #
- # Make sure that our parent txn is not timed out. We
- # will try to begin another child tnx using the parent.
- # We expect that to succeed. Immediately commit that
- # txn.
- #
- set stat [catch {$env txn -parent $parent_txn} newchild]
- error_check_good newchildtxn $stat 0
- error_check_good newcommit [$newchild commit] 0
-
- puts "\tRpc001.h: Timeout, then activate child txn"
- tclsleep $sleeptime
- set stat [catch {$child0 commit} ret]
- error_check_good child_commit $stat 0
- error_check_good child_commit:$child0 $ret 0
-
- #
- # Make sure that our nested txn is not timed out. We
- # will try to begin another child tnx using the parent.
- # We expect that to succeed. Immediately commit that
- # txn.
- #
- set stat \
- [catch {$env txn -parent $last_parent} newchild]
- error_check_good newchildtxn $stat 0
- error_check_good newcommit [$newchild commit] 0
-
- puts "\tRpc001.i: Timeout, then activate nested txn"
- tclsleep $sleeptime
- set stat [catch {$last_txn commit} ret]
- error_check_good lasttxn_commit $stat 0
- error_check_good lasttxn_commit:$child0 $ret 0
-
- #
- # Make sure that our child txn is not timed out. We
- # should be able to commit it.
- #
- set stat [catch {$child1 commit} ret]
- error_check_good child_commit:$child1 $stat 0
- error_check_good child_commit:$child1 $ret 0
-
- #
- # Clean up. They were inserted in LIFO order, so we
- # should just be able to commit them all.
- #
- foreach t $txn_list {
- set stat [catch {$t commit} ret]
- error_check_good txn_commit:$t $stat 0
- error_check_good txn_commit:$t $ret 0
- }
- }
-
- set stat [catch {$db close} ret]
- error_check_good db_close $stat 0
-
- rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
- rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
-
- puts "\tRpc001.l: Timeout idle env handle"
- set sleeptime [expr $itime + 2]
- tclsleep $sleeptime
-
- #
- # We need to do another operation to time out the environment
- # handle. Open another environment, with an invalid home
- # directory.
- #
- set stat [catch {eval {berkdb_env_noerr -home "$home.fail" \
- -server $rpc_server}} ret]
- error_check_good env_open $stat 1
-
- set stat [catch {$env close} ret]
- error_check_good env_close $stat 1
- error_check_good env_timeout \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
- } res]
- if { $status != 0 } {
- puts $res
- }
- tclkill $dpid
-}
-
-proc rpc_timeoutjoin {env msg sleeptime use_txn} {
- #
- # Check join cursors now.
- #
- puts -nonewline "\t$msg: Test join cursors and timeouts"
- if { $use_txn } {
- puts " (using txns)"
- set txnflag "-auto_commit"
- } else {
- puts " (without txns)"
- set txnflag ""
- }
- #
- # Set up a simple set of join databases
- #
- puts "\t${msg}0: Set up join databases"
- set fruit {
- {blue blueberry}
- {red apple} {red cherry} {red raspberry}
- {yellow lemon} {yellow pear}
- }
- set price {
- {expen blueberry} {expen cherry} {expen raspberry}
- {inexp apple} {inexp lemon} {inexp pear}
- }
- set dessert {
- {blueberry cobbler} {cherry cobbler} {pear cobbler}
- {apple pie} {raspberry pie} {lemon pie}
- }
- set fdb [eval {berkdb_open -create -btree -mode 0644} \
- $txnflag -env $env -dup -dupsort fruit.db]
- error_check_good dbopen [is_valid_db $fdb] TRUE
- set pdb [eval {berkdb_open -create -btree -mode 0644} \
- $txnflag -env $env -dup -dupsort price.db]
- error_check_good dbopen [is_valid_db $pdb] TRUE
- set ddb [eval {berkdb_open -create -btree -mode 0644} \
- $txnflag -env $env -dup -dupsort dessert.db]
- error_check_good dbopen [is_valid_db $ddb] TRUE
- foreach kd $fruit {
- set k [lindex $kd 0]
- set d [lindex $kd 1]
- set ret [eval {$fdb put} $txnflag {$k $d}]
- error_check_good fruit_put $ret 0
- }
- error_check_good sync [$fdb sync] 0
- foreach kd $price {
- set k [lindex $kd 0]
- set d [lindex $kd 1]
- set ret [eval {$pdb put} $txnflag {$k $d}]
- error_check_good price_put $ret 0
- }
- error_check_good sync [$pdb sync] 0
- foreach kd $dessert {
- set k [lindex $kd 0]
- set d [lindex $kd 1]
- set ret [eval {$ddb put} $txnflag {$k $d}]
- error_check_good dessert_put $ret 0
- }
- error_check_good sync [$ddb sync] 0
-
- rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
- rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
-
- error_check_good ddb:close [$ddb close] 0
- error_check_good pdb:close [$pdb close] 0
- error_check_good fdb:close [$fdb close] 0
- error_check_good ddb:remove [$env dbremove dessert.db] 0
- error_check_good pdb:remove [$env dbremove price.db] 0
- error_check_good fdb:remove [$env dbremove fruit.db] 0
-}
-
-proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
- global errorInfo
- global is_je_test
-
- #
- # Start a parent and child transaction. We'll do our join in
- # the child transaction just to make sure everything gets timed
- # out correctly.
- #
- set curs_list {}
- set txn_list {}
- set msgnum [expr $op * 2 + 1]
- if { $use_txn } {
- puts "\t$msg$msgnum: Set up txns and join cursor"
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set txn_list [linsert $txn_list 0 $txn]
- if { !$is_je_test } {
- set child0 [$env txn -parent $txn]
- error_check_good txn_begin \
- [is_valid_txn $child0 $env] TRUE
- set txn_list [linsert $txn_list 0 $child0]
- set child1 [$env txn -parent $txn]
- error_check_good txn_begin \
- [is_valid_txn $child1 $env] TRUE
- set txn_list [linsert $txn_list 0 $child1]
- } else {
- set child0 $txn
- set child1 $txn
- }
- set txncmd "-txn $child0"
- } else {
- puts "\t$msg$msgnum: Set up join cursor"
- set txncmd ""
- }
-
- #
- # Start a cursor, (using txn child0 in the fruit and price dbs, if
- # needed). # Just pick something simple to join on.
- # Then call join on the dessert db.
- #
- set fkey yellow
- set pkey inexp
- set fdbc [eval $fdb cursor $txncmd]
- error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
- set ret [$fdbc get -set $fkey]
- error_check_bad fget:set [llength $ret] 0
- set k [lindex [lindex $ret 0] 0]
- error_check_good fget:set:key $k $fkey
- set curs_list [linsert $curs_list 0 $fdbc]
-
- set pdbc [eval $pdb cursor $txncmd]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- set ret [$pdbc get -set $pkey]
- error_check_bad pget:set [llength $ret] 0
- set k [lindex [lindex $ret 0] 0]
- error_check_good pget:set:key $k $pkey
- set curs_list [linsert $curs_list 0 $pdbc]
-
- set jdbc [$ddb join $fdbc $pdbc]
- error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
- set ret [$jdbc get]
- error_check_bad jget [llength $ret] 0
-
- set msgnum [expr $op * 2 + 2]
- if { $op == 1 } {
- puts -nonewline "\t$msg$msgnum: Timeout all cursors"
- if { $use_txn } {
- puts " and txns"
- } else {
- puts ""
- }
- } else {
- puts "\t$msg$msgnum: Timeout, then activate join cursor"
- }
-
- tclsleep $sleep
-
- if { $op == 1 } {
- #
- # Perform a generic db operations to cause the timeout routine
- # to trigger.
- #
- set stat [catch {$fdb stat} ret]
- error_check_good fdbstat $stat 0
-
- #
- # Check that join cursor is timed out.
- #
- set stat [catch {$jdbc close} ret]
- error_check_good dbc_close:$jdbc $stat 1
- error_check_good dbc_timeout:$jdbc \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
-
- #
- # Now the server may or may not timeout constituent
- # cursors when it times out the join cursor. So, just
- # sleep again and then they should timeout.
- #
- tclsleep $sleep
- set stat [catch {$fdb stat} ret]
- error_check_good fdbstat $stat 0
-
- foreach c $curs_list {
- set stat [catch {$c close} ret]
- error_check_good dbc_close:$c $stat 1
- error_check_good dbc_timeout:$c \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
- }
-
- foreach t $txn_list {
- set stat [catch {$t commit} ret]
- error_check_good txn_commit:$t $stat 1
- error_check_good txn_timeout:$t \
- [is_substr $errorInfo "DB_NOSERVER_ID"] 1
- }
- } else {
- set stat [catch {$jdbc get} ret]
- error_check_good jget.stat $stat 0
- error_check_bad jget [llength $ret] 0
- set curs_list [linsert $curs_list 0 $jdbc]
- foreach c $curs_list {
- set stat [catch {$c close} ret]
- error_check_good dbc_close:$c $stat 0
- error_check_good dbc_close:$c $ret 0
- }
-
- foreach t $txn_list {
- set stat [catch {$t commit} ret]
- error_check_good txn_commit:$t $stat 0
- error_check_good txn_commit:$t $ret 0
- }
- }
-}
diff --git a/storage/bdb/test/rpc002.tcl b/storage/bdb/test/rpc002.tcl
deleted file mode 100644
index 505c23e7be7..00000000000
--- a/storage/bdb/test/rpc002.tcl
+++ /dev/null
@@ -1,161 +0,0 @@
-# Sel the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rpc002.tcl,v 1.25 2004/07/15 17:19:12 sue Exp $
-#
-# TEST rpc002
-# TEST Test invalid RPC functions and make sure we error them correctly
-# TEST Test server home directory error cases
-proc rpc002 { } {
- global __debug_on
- global __debug_print
- global errorInfo
- global rpc_svc
- source ./include.tcl
-
- set testfile "rpc002.db"
- set home [file tail $rpc_testdir]
- #
- # First start the server.
- #
- puts "Rpc002: Error and Unsupported interface test"
- set dpid [rpc_server_start]
- puts "\tRpc002.a: Started server, pid $dpid"
-
- #
- # Wrap the whole test in a catch statement so we can still
- # kill the rpc server even if the test fails.
- #
- set status [catch {
- tclsleep 2
- remote_cleanup $rpc_server $rpc_testdir $testdir
-
- puts "\tRpc002.b: Unsupported env options"
- #
- # Test each "pre-open" option for env's. These need to be
- # tested on the 'berkdb_env' line.
- #
- set rlist {
- { "-data_dir $rpc_testdir" "Rpc002.b0"}
- { "-log_buffer 512" "Rpc002.b1"}
- { "-log_dir $rpc_testdir" "Rpc002.b2"}
- { "-log_max 100" "Rpc002.b3"}
- { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
- { "-lock_detect default" "Rpc002.b5"}
- { "-lock_max 100" "Rpc002.b6"}
- { "-mpool_mmap_size 100" "Rpc002.b7"}
- { "-shm_key 100" "Rpc002.b9"}
- { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
- { "-txn_max 100" "Rpc002.b11"}
- { "-txn_timestamp 100" "Rpc002.b12"}
- { "-verbose {recovery on}" "Rpc002.b13"}
- }
-
- set e "berkdb_env_noerr -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000 -txn"
-
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
- puts "\t$msg: $cmd"
-
- set stat [catch {eval $e $cmd} ret]
- error_check_good $cmd $stat 1
- error_check_good $cmd.err [is_substr $errorInfo \
- "unsupported in RPC environments"] 1
- }
-
- #
- # Open an env with all the subsystems (-txn implies all
- # the rest)
- #
- puts "\tRpc002.c: Unsupported env related interfaces"
- set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000 -txn}]
- error_check_good envopen [is_valid_env $env] TRUE
- set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \
- -env $env $testfile"
- set db [eval $dbcmd]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- #
- # Test each "post-open" option relating to envs, txns, locks,
- # logs and mpools.
- #
- set rlist {
- { " lock_detect default" "Rpc002.c0"}
- { " lock_get read 1 $env" "Rpc002.c1"}
- { " lock_id" "Rpc002.c2"}
- { " lock_stat" "Rpc002.c3"}
- { " lock_vec 1 {get $env read}" "Rpc002.c4"}
- { " log_archive" "Rpc002.c5"}
- { " log_file {0 0}" "Rpc002.c6"}
- { " log_flush" "Rpc002.c7"}
- { " log_cursor" "Rpc002.c8"}
- { " log_stat" "Rpc002.c9"}
- { " mpool -create -pagesize 512" "Rpc002.c10"}
- { " mpool_stat" "Rpc002.c11"}
- { " mpool_sync {0 0}" "Rpc002.c12"}
- { " mpool_trickle 50" "Rpc002.c13"}
- { " txn_checkpoint -min 1" "Rpc002.c14"}
- { " txn_stat" "Rpc002.c15"}
- }
-
- foreach pair $rlist {
- set cmd [lindex $pair 0]
- set msg [lindex $pair 1]
- puts "\t$msg: $cmd"
-
- set stat [catch {eval $env $cmd} ret]
- error_check_good $cmd $stat 1
- error_check_good $cmd.err [is_substr $errorInfo \
- "unsupported in RPC environments"] 1
- }
- error_check_good dbclose [$db close] 0
-
- #
- # The database operations that aren't supported are few
- # because mostly they are the ones Tcl doesn't support
- # either so we have no way to get at them. Test what we can.
- #
- puts "\tRpc002.d: Unsupported database related interfaces"
- #
- # NOTE: the type of database doesn't matter, just use btree.
- #
- puts "\tRpc002.d0: -cachesize"
- set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \
- -env $env -cachesize {0 65536 0} $testfile"
- set stat [catch {eval $dbcmd} ret]
- error_check_good dbopen_cache $stat 1
- error_check_good dbopen_cache_err \
- [is_substr $errorInfo "unsupported in RPC environments"] 1
-
- puts "\tRpc002.d1: Try to upgrade a database"
- #
- # NOTE: the type of database doesn't matter, just use btree.
- set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
- error_check_good dbupgrade $stat 1
- error_check_good dbupgrade_err [is_substr $errorInfo \
- "unsupported in RPC environments"] 1
- error_check_good envclose [$env close] 0
-
- puts "\tRpc002.e: Open env with unsupported home dir"
- set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \
- -home XXX -server $rpc_server -client_timeout 10000 \
- -txn}} ret]
- error_check_good env2open $stat 1
- error_check_good envfail [is_substr $ret "Home unrecognized"] 1
-
- puts "\tRpc002.f: Open env with a NULL home dir"
- set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \
- -server $rpc_server -client_timeout 10000 -txn}} ret]
- error_check_good env2open $stat 1
- error_check_good envfail [is_substr $ret "Home unrecognized"] 1
- } res]
- if { $status != 0 } {
- puts $res
- }
- tclkill $dpid
-}
diff --git a/storage/bdb/test/rpc003.tcl b/storage/bdb/test/rpc003.tcl
deleted file mode 100644
index 890cbc30f5e..00000000000
--- a/storage/bdb/test/rpc003.tcl
+++ /dev/null
@@ -1,184 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rpc003.tcl,v 11.16 2004/03/02 18:44:41 mjc Exp $
-#
-# TEST rpc003
-# TEST Test RPC and secondary indices.
-proc rpc003 { } {
- source ./include.tcl
- global dict nsecondaries
- global rpc_svc
-
- #
- # First set up the files. Secondary indices only work readonly
- # over RPC. So we need to create the databases first without
- # RPC. Then run checking over RPC.
- #
- puts "Rpc003: Secondary indices over RPC"
- if { [string compare $rpc_server "localhost"] != 0 } {
- puts "Cannot run to non-local RPC server. Skipping."
- return
- }
- cleanup $testdir NULL
- puts "\tRpc003.a: Creating local secondary index databases"
-
- # Primary method/args.
- set pmethod btree
- set pomethod [convert_method $pmethod]
- set pargs ""
- set methods {dbtree dbtree}
- set argses [convert_argses $methods ""]
- set omethods [convert_methods $methods]
-
- set nentries 500
-
- puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
- set pname "primary003.db"
- set snamebase "secondary003"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- # Open and associate the secondaries
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
-
- set did [open $dict]
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
- close $did
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good primary_close [$pdb close] 0
- error_check_good env_close [$env close] 0
-
- #
- # We have set up our databases, so now start the server and
- # read them over RPC.
- #
- set dpid [rpc_server_start]
- puts "\tRpc003.c: Started server, pid $dpid"
-
- #
- # Wrap the remainder of the test in a catch statement so we
- # can still kill the rpc server even if the test fails.
- #
- set status [catch {
- tclsleep 2
- set home [file tail $rpc_testdir]
- set env [eval {berkdb_env_noerr -create -mode 0644 \
- -home $home -server $rpc_server}]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- #
- # Attempt to send in a NULL callback to associate. It will
- # fail if the primary and secondary are not both read-only.
- #
- set msg "\tRpc003.d"
- puts "$msg: Using r/w primary and r/w secondary"
- set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
- set sopen "berkdb_open_noerr -create -env $env \
- [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
- rpc003_assoc_err $popen $sopen $msg
-
- set msg "\tRpc003.e"
- puts "$msg: Using r/w primary and read-only secondary"
- set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
- set sopen "berkdb_open_noerr -env $env -rdonly \
- [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
- rpc003_assoc_err $popen $sopen $msg
-
- set msg "\tRpc003.f"
- puts "$msg: Using read-only primary and r/w secondary"
- set popen "berkdb_open_noerr -env $env \
- $pomethod -rdonly $pargs $pname"
- set sopen "berkdb_open_noerr -create -env $env \
- [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
- rpc003_assoc_err $popen $sopen $msg
-
- # Open and associate the secondaries
- puts "\tRpc003.g: Checking secondaries, both read-only"
- set pdb [eval {berkdb_open_noerr -env} $env \
- -rdonly $pomethod $pargs $pname]
- error_check_good primary_open2 [is_valid_db $pdb] TRUE
-
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -env} $env -rdonly \
- [lindex $omethods $i] [lindex $argses $i] \
- $snamebase.$i.db]
- error_check_good second_open2($i) \
- [is_valid_db $sdb] TRUE
- error_check_good db_associate2($i) \
- [eval {$pdb associate} "" $sdb] 0
- lappend sdbs $sdb
- }
- check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
-
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good primary_close [$pdb close] 0
- error_check_good env_close [$env close] 0
- } res]
- if { $status != 0 } {
- puts $res
- }
- tclkill $dpid
-}
-
-proc rpc003_assoc_err { popen sopen msg } {
- global rpc_svc
-
- set pdb [eval $popen]
- error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
-
- puts "$msg.0: NULL callback"
- set sdb [eval $sopen]
- error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
- set stat [catch {eval {$pdb associate} "" $sdb} ret]
- error_check_good db_associate:rdonly $stat 1
- error_check_good db_associate:inval [is_substr $ret invalid] 1
-
- # The Java and JE RPC servers support callbacks.
- if { $rpc_svc == "berkeley_db_svc" || \
- $rpc_svc == "berkeley_db_cxxsvc" } {
- puts "$msg.1: non-NULL callback"
- set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
- error_check_good db_associate:callback $stat 1
- error_check_good db_associate:inval [is_substr $ret invalid] 1
- }
-
- error_check_good assoc_sclose [$sdb close] 0
- error_check_good assoc_pclose [$pdb close] 0
-}
diff --git a/storage/bdb/test/rpc004.tcl b/storage/bdb/test/rpc004.tcl
deleted file mode 100644
index 468e2727076..00000000000
--- a/storage/bdb/test/rpc004.tcl
+++ /dev/null
@@ -1,87 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rpc004.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST rpc004
-# TEST Test RPC server and security
-proc rpc004 { } {
- global __debug_on
- global __debug_print
- global errorInfo
- global passwd
- global has_crypto
- global rpc_svc
- source ./include.tcl
-
- puts "Rpc004: RPC server + security"
- # Skip test if release does not support encryption.
- if { $has_crypto == 0 } {
- puts "Skipping test rpc004 for non-crypto release."
- return
- }
-
- cleanup $testdir NULL
- set dpid [rpc_server_start 1]
- puts "\tRpc004.a: Started server, pid $dpid"
-
- #
- # Wrap the test in a catch statement so we can still kill
- # the rpc server even if the test fails.
- #
- set status [catch {
- tclsleep 2
- remote_cleanup $rpc_server $rpc_testdir $testdir
-
- puts "\tRpc004.b: Creating environment"
-
- set testfile "rpc004.db"
- set testfile1 "rpc004a.db"
- set home [file tail $rpc_testdir]
-
- set env [eval {berkdb_env -create -mode 0644 -home $home \
- -server $rpc_server -encryptaes $passwd -txn}]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- puts "\tRpc004.c: Opening a non-encrypted database"
- #
- # NOTE: the type of database doesn't matter, just use btree.
- set db [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tRpc004.d: Opening an encrypted database"
- set db1 [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env -encrypt $testfile1]
- error_check_good dbopen [is_valid_db $db1] TRUE
-
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- puts "\tRpc004.e: Put/get on both databases"
- set key "key"
- set data "data"
-
- set ret [$db put -txn $txn $key $data]
- error_check_good db_put $ret 0
- set ret [$db get -txn $txn $key]
- error_check_good db_get $ret [list [list $key $data]]
- set ret [$db1 put -txn $txn $key $data]
- error_check_good db1_put $ret 0
- set ret [$db1 get -txn $txn $key]
- error_check_good db1_get $ret [list [list $key $data]]
-
- error_check_good txn_commit [$txn commit] 0
- error_check_good db_close [$db close] 0
- error_check_good db1_close [$db1 close] 0
- error_check_good env_close [$env close] 0
-
- # Cleanup our environment because it's encrypted
- remote_cleanup $rpc_server $rpc_testdir $testdir
- } res]
- if { $status != 0 } {
- puts $res
- }
- tclkill $dpid
-}
diff --git a/storage/bdb/test/rpc005.tcl b/storage/bdb/test/rpc005.tcl
deleted file mode 100644
index 3111f651b16..00000000000
--- a/storage/bdb/test/rpc005.tcl
+++ /dev/null
@@ -1,158 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rpc005.tcl,v 11.11 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST rpc005
-# TEST Test RPC server handle ID sharing
-proc rpc005 { } {
- global __debug_on
- global __debug_print
- global errorInfo
- global rpc_svc
- global is_hp_test
- source ./include.tcl
-
- puts "Rpc005: RPC server handle sharing"
- set dpid [rpc_server_start]
- puts "\tRpc005.a: Started server, pid $dpid"
-
- #
- # Wrap the test in a catch statement so we can still kill
- # the rpc server even if the test fails.
- #
- set status [catch {
- tclsleep 2
- remote_cleanup $rpc_server $rpc_testdir $testdir
- puts "\tRpc005.b: Creating environment"
-
- set testfile "rpc005.db"
- set testfile1 "rpc005a.db"
- set subdb1 "subdb1"
- set subdb2 "subdb2"
- set home [file tail $rpc_testdir]
-
- set env [eval {berkdb_env -create -mode 0644 -home $home \
- -server $rpc_server -txn}]
- error_check_good lock_env:open [is_valid_env $env] TRUE
-
- # You can't open two handles on the same env in
- # HP-UX, so skip this piece.
- if { $is_hp_test == 1 } {
- puts "\tRpc005.c: Skipping for HP-UX."
- } else {
- puts "\tRpc005.c: Compare identical and different \
- configured envs"
- set env_ident [eval {berkdb_env -home $home \
- -server $rpc_server -txn}]
- error_check_good \
- lock_env:open [is_valid_env $env_ident] TRUE
-
- set env_diff [eval {berkdb_env -home $home \
- -server $rpc_server -txn nosync}]
- error_check_good \
- lock_env:open [is_valid_env $env_diff] TRUE
-
- error_check_good \
- ident:id [$env rpcid] [$env_ident rpcid]
- error_check_bad \
- diff:id [$env rpcid] [$env_diff rpcid]
-
- error_check_good envclose [$env_diff close] 0
- error_check_good envclose [$env_ident close] 0
- }
-
- puts "\tRpc005.d: Opening a database"
- set db [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tRpc005.e: Compare identical and different \
- configured dbs"
- set db_ident [eval {berkdb_open -btree} -env $env $testfile]
- error_check_good dbopen [is_valid_db $db_ident] TRUE
-
- set db_diff [eval {berkdb_open -btree} -env $env -rdonly \
- $testfile]
- error_check_good dbopen [is_valid_db $db_diff] TRUE
-
- set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
- $testfile]
- error_check_good dbopen [is_valid_db $db_diff2] TRUE
-
- error_check_good ident:id [$db rpcid] [$db_ident rpcid]
- error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
- error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
-
- error_check_good db_close [$db_ident close] 0
- error_check_good db_close [$db_diff close] 0
- error_check_good db_close [$db_diff2 close] 0
- error_check_good db_close [$db close] 0
-
- puts "\tRpc005.f: Compare with a database and subdatabases"
- set db [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env $testfile1 $subdb1]
- error_check_good dbopen [is_valid_db $db] TRUE
- set dbid [$db rpcid]
-
- set db2 [eval {berkdb_open -auto_commit -create -btree \
- -mode 0644} -env $env $testfile1 $subdb2]
- error_check_good dbopen [is_valid_db $db2] TRUE
- set db2id [$db2 rpcid]
- error_check_bad 2subdb:id $dbid $db2id
-
- set db_ident [eval {berkdb_open -btree} -env $env \
- $testfile1 $subdb1]
- error_check_good dbopen [is_valid_db $db_ident] TRUE
- set identid [$db_ident rpcid]
-
- set db_ident2 [eval {berkdb_open -btree} -env $env \
- $testfile1 $subdb2]
- error_check_good dbopen [is_valid_db $db_ident2] TRUE
- set ident2id [$db_ident2 rpcid]
-
- set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
- $testfile1 $subdb1]
- error_check_good dbopen [is_valid_db $db_diff1] TRUE
- set diff1id [$db_diff1 rpcid]
-
- set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
- $testfile1 $subdb2]
- error_check_good dbopen [is_valid_db $db_diff2] TRUE
- set diff2id [$db_diff2 rpcid]
-
- set db_diff [eval {berkdb_open -unknown} -env $env -rdonly \
- $testfile1]
- error_check_good dbopen [is_valid_db $db_diff] TRUE
- set diffid [$db_diff rpcid]
-
- set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
- $testfile1 $subdb2]
- error_check_good dbopen [is_valid_db $db_diff2a] TRUE
- set diff2aid [$db_diff2a rpcid]
-
- error_check_good ident:id $dbid $identid
- error_check_good ident2:id $db2id $ident2id
- error_check_bad diff:id $dbid $diffid
- error_check_bad diff2:id $db2id $diffid
- error_check_bad diff3:id $diff2id $diffid
- error_check_bad diff4:id $diff1id $diffid
- error_check_good diff2a:id $diff2id $diff2aid
-
- error_check_good db_close [$db_ident close] 0
- error_check_good db_close [$db_ident2 close] 0
- error_check_good db_close [$db_diff close] 0
- error_check_good db_close [$db_diff1 close] 0
- error_check_good db_close [$db_diff2 close] 0
- error_check_good db_close [$db_diff2a close] 0
- error_check_good db_close [$db2 close] 0
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
- } res]
- if { $status != 0 } {
- puts $res
- }
- tclkill $dpid
-}
diff --git a/storage/bdb/test/rsrc001.tcl b/storage/bdb/test/rsrc001.tcl
deleted file mode 100644
index b44d5ebd642..00000000000
--- a/storage/bdb/test/rsrc001.tcl
+++ /dev/null
@@ -1,221 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rsrc001.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST rsrc001
-# TEST Recno backing file test. Try different patterns of adding
-# TEST records and making sure that the corresponding file matches.
-proc rsrc001 { } {
- source ./include.tcl
-
- puts "Rsrc001: Basic recno backing file writeback tests"
-
- # We run this test essentially twice, once with a db file
- # and once without (an in-memory database).
- set rec1 "This is record 1"
- set rec2 "This is record 2 This is record 2"
- set rec3 "This is record 3 This is record 3 This is record 3"
- set rec4 [replicate "This is record 4 " 512]
-
- foreach testfile { "$testdir/rsrc001.db" "" } {
-
- cleanup $testdir NULL
-
- if { $testfile == "" } {
- puts "Rsrc001: Testing with in-memory database."
- } else {
- puts "Rsrc001: Testing with disk-backed database."
- }
-
- # Create backing file for the empty-file test.
- set oid1 [open $testdir/rsrc.txt w]
- close $oid1
-
- puts "\tRsrc001.a: Put to empty file."
- set db [eval {berkdb_open -create -mode 0644\
- -recno -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set txn ""
-
- set ret [eval {$db put} $txn {1 $rec1}]
- error_check_good put_to_empty $ret 0
- error_check_good db_close [$db close] 0
-
- # Now fill out the backing file and create the check file.
- set oid1 [open $testdir/rsrc.txt a]
- set oid2 [open $testdir/check.txt w]
-
- # This one was already put into rsrc.txt.
- puts $oid2 $rec1
-
- # These weren't.
- puts $oid1 $rec2
- puts $oid2 $rec2
- puts $oid1 $rec3
- puts $oid2 $rec3
- puts $oid1 $rec4
- puts $oid2 $rec4
- close $oid1
- close $oid2
-
- puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
- puts " write it out and diff"
- set db [eval {berkdb_open -create -mode 0644\
- -recno -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Read the last record; replace it (but we won't change it).
- # Then close the file and diff the two files.
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set rec [$dbc get -last]
- error_check_good get_last [llength [lindex $rec 0]] 2
- set key [lindex [lindex $rec 0] 0]
- set data [lindex [lindex $rec 0] 1]
-
- # Get the last record from the text file
- set oid [open $testdir/rsrc.txt]
- set laststr ""
- while { [gets $oid str] != -1 } {
- set laststr $str
- }
- close $oid
- set data [sanitize_record $data]
- error_check_good getlast $data $laststr
-
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good replace_last $ret 0
-
- error_check_good curs_close [$dbc close] 0
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- error_check_good \
- Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
- [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
-
- puts -nonewline "\tRsrc001.c: "
- puts "Append some records in tree and verify in file."
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [replicate "New Record $i" $i]
- puts $oid $rec
- incr key
- set ret [eval {$db put} $txn {-append $rec}]
- error_check_good put_append $ret $key
- }
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
-
- puts "\tRsrc001.d: Append by record number"
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [replicate "New Record (set 2) $i" $i]
- puts $oid $rec
- incr key
- set ret [eval {$db put} $txn {$key $rec}]
- error_check_good put_byno $ret 0
- }
-
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
-
- puts "\tRsrc001.e: Put beyond end of file."
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- puts $oid ""
- incr key
- }
- set rec "Last Record"
- puts $oid $rec
- incr key
-
- set ret [eval {$db put} $txn {$key $rec}]
- error_check_good put_byno $ret 0
-
- puts "\tRsrc001.f: Put beyond end of file, after reopen."
-
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open -create -mode 0644\
- -recno -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set rec "Last record with reopen"
- puts $oid $rec
-
- incr key
- set ret [eval {$db put} $txn {$key $rec}]
- error_check_good put_byno_with_reopen $ret 0
-
- puts "\tRsrc001.g:\
- Put several beyond end of file, after reopen with snapshot."
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open -create -mode 0644\
- -snapshot -recno -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set rec "Really really last record with reopen"
- puts $oid ""
- puts $oid ""
- puts $oid ""
- puts $oid $rec
-
- incr key
- incr key
- incr key
- incr key
-
- set ret [eval {$db put} $txn {$key $rec}]
- error_check_good put_byno_with_reopen $ret 0
-
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
-
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
-
- puts "\tRsrc001.h: Verify proper syncing of changes on close."
- error_check_good Rsrc001:db_close [$db close] 0
- set db [eval {berkdb_open -create -mode 0644 -recno \
- -source $testdir/rsrc.txt} $testfile]
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [replicate "New Record $i" $i]
- puts $oid $rec
- set ret [eval {$db put} $txn {-append $rec}]
- # Don't bother checking return; we don't know what
- # the key number is, and we'll pick up a failure
- # when we compare.
- }
- error_check_good Rsrc001:db_close [$db close] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
- }
-}
-
-# Strip CRs from a record.
-# Needed on Windows when a file is created as text (with CR/LF)
-# but read as binary (where CR is read as a separate character)
-proc sanitize_record { rec } {
- source ./include.tcl
-
- if { $is_windows_test != 1 } {
- return $rec
- }
- regsub -all \15 $rec "" data
- return $data
-}
diff --git a/storage/bdb/test/rsrc002.tcl b/storage/bdb/test/rsrc002.tcl
deleted file mode 100644
index 808159a01e0..00000000000
--- a/storage/bdb/test/rsrc002.tcl
+++ /dev/null
@@ -1,66 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rsrc002.tcl,v 11.16 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST rsrc002
-# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
-# TEST file with colon-delimited records, and make sure they are correctly
-# TEST interpreted.
-proc rsrc002 { } {
- source ./include.tcl
-
- puts "Rsrc002: Alternate variable-length record delimiters."
-
- # We run this test essentially twice, once with a db file
- # and once without (an in-memory database).
- foreach testfile { "$testdir/rsrc002.db" "" } {
-
- cleanup $testdir NULL
-
- # Create the starting files
- set oid1 [open $testdir/rsrc.txt w]
- set oid2 [open $testdir/check.txt w]
- puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
- puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
- close $oid1
- close $oid2
-
- if { $testfile == "" } {
- puts "Rsrc002: Testing with in-memory database."
- } else {
- puts "Rsrc002: Testing with disk-backed database."
- }
-
- puts "\tRsrc002.a: Read file, verify correctness."
- set db [eval {berkdb_open -create -mode 0644 -delim 58 \
- -recno -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Read the last record; replace it (but we won't change it).
- # Then close the file and diff the two files.
- set txn ""
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set rec [$dbc get -first]
- error_check_good get_first $rec [list [list 1 "ostrich"]]
- set rec [$dbc get -next]
- error_check_good get_next $rec [list [list 2 "emu"]]
-
- puts "\tRsrc002.b: Write record, verify correctness."
-
- eval {$dbc get -set 4}
- set ret [$dbc put -current "penguin"]
- error_check_good dbc_put $ret 0
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
-
- error_check_good \
- Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
- [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
- }
-}
diff --git a/storage/bdb/test/rsrc003.tcl b/storage/bdb/test/rsrc003.tcl
deleted file mode 100644
index b62a6bcb15e..00000000000
--- a/storage/bdb/test/rsrc003.tcl
+++ /dev/null
@@ -1,173 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rsrc003.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST rsrc003
-# TEST Recno backing file test. Try different patterns of adding
-# TEST records and making sure that the corresponding file matches.
-proc rsrc003 { } {
- source ./include.tcl
- global fixed_len
-
- puts "Rsrc003: Basic recno backing file writeback tests fixed length"
-
- # We run this test essentially twice, once with a db file
- # and once without (an in-memory database).
- #
- # Then run with big fixed-length records
- set rec1 "This is record 1"
- set rec2 "This is record 2"
- set rec3 "This is record 3"
- set bigrec1 [replicate "This is record 1 " 512]
- set bigrec2 [replicate "This is record 2 " 512]
- set bigrec3 [replicate "This is record 3 " 512]
-
- set orig_fixed_len $fixed_len
- set rlist {
- {{$rec1 $rec2 $rec3} "small records" }
- {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
-
- foreach testfile { "$testdir/rsrc003.db" "" } {
-
- foreach rec $rlist {
- cleanup $testdir NULL
-
- set recs [lindex $rec 0]
- set msg [lindex $rec 1]
- # Create the starting files
- # Note that for the rest of the test, we are going
- # to append a LF when we 'put' via DB to maintain
- # file structure and allow us to use 'gets'.
- set oid1 [open $testdir/rsrc.txt w]
- set oid2 [open $testdir/check.txt w]
- foreach record $recs {
- set r [subst $record]
- set fixed_len [string length $r]
- puts $oid1 $r
- puts $oid2 $r
- }
- close $oid1
- close $oid2
-
- set reclen [expr $fixed_len + 1]
- if { $reclen > [string length $rec1] } {
- set repl 512
- } else {
- set repl 2
- }
- if { $testfile == "" } {
- puts \
-"Rsrc003: Testing with in-memory database with $msg."
- } else {
- puts \
-"Rsrc003: Testing with disk-backed database with $msg."
- }
-
- puts -nonewline \
- "\tRsrc003.a: Read file, rewrite last record;"
- puts " write it out and diff"
- set db [eval {berkdb_open -create -mode 0644 -recno \
- -len $reclen -source $testdir/rsrc.txt} $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Read the last record; replace it (don't change it).
- # Then close the file and diff the two files.
- set txn ""
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor \
- [is_valid_cursor $dbc $db] TRUE
-
- set rec [$dbc get -last]
- error_check_good get_last [llength [lindex $rec 0]] 2
- set key [lindex [lindex $rec 0] 0]
- set data [lindex [lindex $rec 0] 1]
-
- # Get the last record from the text file
- set oid [open $testdir/rsrc.txt]
- set laststr ""
- while { [gets $oid str] != -1 } {
- append str \12
- set laststr $str
- }
- close $oid
- set data [sanitize_record $data]
- error_check_good getlast $data $laststr
-
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good replace_last $ret 0
-
- error_check_good curs_close [$dbc close] 0
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- error_check_good \
- diff1($testdir/rsrc.txt,$testdir/check.txt) \
- [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
-
- puts -nonewline "\tRsrc003.b: "
- puts "Append some records in tree and verify in file."
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [chop_data -frecno [replicate \
- "This is New Record $i" $repl]]
- puts $oid $rec
- append rec \12
- incr key
- set ret [eval {$db put} $txn {-append $rec}]
- error_check_good put_append $ret $key
- }
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- diff2($testdir/{rsrc.txt,check.txt}) $ret 0
-
- puts "\tRsrc003.c: Append by record number"
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [chop_data -frecno [replicate \
- "New Record (set 2) $i" $repl]]
- puts $oid $rec
- append rec \12
- incr key
- set ret [eval {$db put} $txn {$key $rec}]
- error_check_good put_byno $ret 0
- }
-
- error_check_good db_sync [$db sync] 0
- error_check_good db_sync [$db sync] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- diff3($testdir/{rsrc.txt,check.txt}) $ret 0
-
- puts \
-"\tRsrc003.d: Verify proper syncing of changes on close."
- error_check_good Rsrc003:db_close [$db close] 0
- set db [eval {berkdb_open -create -mode 0644 -recno \
- -len $reclen -source $testdir/rsrc.txt} $testfile]
- set oid [open $testdir/check.txt a]
- for {set i 1} {$i < 10} {incr i} {
- set rec [chop_data -frecno [replicate \
- "New Record (set 3) $i" $repl]]
- puts $oid $rec
- append rec \12
- set ret [eval {$db put} $txn {-append $rec}]
- # Don't bother checking return;
- # we don't know what
- # the key number is, and we'll pick up a failure
- # when we compare.
- }
- error_check_good Rsrc003:db_close [$db close] 0
- close $oid
- set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
- error_check_good \
- diff5($testdir/{rsrc,check}.txt) $ret 0
- }
- }
- set fixed_len $orig_fixed_len
- return
-}
diff --git a/storage/bdb/test/rsrc004.tcl b/storage/bdb/test/rsrc004.tcl
deleted file mode 100644
index 20f11372a17..00000000000
--- a/storage/bdb/test/rsrc004.tcl
+++ /dev/null
@@ -1,52 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: rsrc004.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST rsrc004
-# TEST Recno backing file test for EOF-terminated records.
-proc rsrc004 { } {
- source ./include.tcl
-
- foreach isfixed { 0 1 } {
- cleanup $testdir NULL
-
- # Create the backing text file.
- set oid1 [open $testdir/rsrc.txt w]
- if { $isfixed == 1 } {
- puts -nonewline $oid1 "record 1xxx"
- puts -nonewline $oid1 "record 2xxx"
- } else {
- puts $oid1 "record 1xxx"
- puts $oid1 "record 2xxx"
- }
- puts -nonewline $oid1 "record 3"
- close $oid1
-
- set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
- if { $isfixed == 1 } {
- append args " -len [string length "record 1xxx"]"
- set match "record 3 "
- puts "Rsrc004: EOF-terminated recs: fixed length"
- } else {
- puts "Rsrc004: EOF-terminated recs: variable length"
- set match "record 3"
- }
-
- puts "\tRsrc004.a: Read file, verify correctness."
- set db [eval berkdb_open $args "$testdir/rsrc004.db"]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Read the last record
- set dbc [eval {$db cursor} ""]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set rec [$dbc get -last]
- error_check_good get_last $rec [list [list 3 $match]]
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- }
-}
diff --git a/storage/bdb/test/scr001/chk.code b/storage/bdb/test/scr001/chk.code
deleted file mode 100644
index 159524d27e1..00000000000
--- a/storage/bdb/test/scr001/chk.code
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.code,v 1.14 2004/11/08 14:45:07 bostic Exp $
-#
-# Check to make sure that the code samples in the documents build.
-
-d=../../../db_docs
-
-[ -d $d ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
- echo 'FAIL: unable to find or build libdb.a'
- exit 1
-}
-
-exitv=0
-for i in `find $d -name '*.cs'`; do
- echo " compiling $i"
- sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
- -e 's/m4_[a-z]*[(\[)]*//g' \
- -e 's/(\[//g' \
- -e '/argv/!s/])//g' \
- -e 's/dnl//g' \
- -e 's/__GT__/>/g' \
- -e 's/__LB__/[/g' \
- -e 's/__LT__/</g' \
- -e 's/__RB__/]/g' < $i > t.c
- if cc -pthread -Wall -Werror -I.. t.c ../libdb.a -o t; then
- :
- else
- echo "FAIL: unable to compile $i"
- exitv=1
- fi
-done
-
-exit $exitv
diff --git a/storage/bdb/test/scr002/chk.def b/storage/bdb/test/scr002/chk.def
deleted file mode 100644
index 7d5e6670f63..00000000000
--- a/storage/bdb/test/scr002/chk.def
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
-#
-# Check to make sure we haven't forgotten to add any interfaces
-# to the Win32 libdb.def file.
-
-d=../..
-
-# Test must be run from the top-level directory, not from a test directory.
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-f=$d/build_win32/libdb.def
-t1=__1
-t2=__2
-
-exitv=0
-
-sed '/; /d' $f |
- egrep @ |
- awk '{print $1}' |
- sed -e '/db_xa_switch/d' \
- -e '/^__/d' -e '/^;/d' |
- sort > $t1
-
-egrep __P $d/dbinc_auto/ext_prot.in |
- sed '/^[a-z]/!d' |
- awk '{print $2}' |
- sed 's/^\*//' |
- sed '/^__/d' | sort > $t2
-
-if cmp -s $t1 $t2 ; then
- :
-else
- echo "<<< libdb.def >>> DB include files"
- diff $t1 $t2
- echo "FAIL: missing items in libdb.def file."
- exitv=1
-fi
-
-# Check to make sure we don't have any extras in the libdb.def file.
-sed '/; /d' $f |
- egrep @ |
- awk '{print $1}' |
- sed -e '/__db_global_values/d' > $t1
-
-for i in `cat $t1`; do
- if egrep $i $d/*/*.c > /dev/null; then
- :
- else
- echo "$f: $i not found in DB sources"
- fi
-done > $t2
-
-test -s $t2 && {
- cat $t2
- echo "FAIL: found unnecessary items in libdb.def file."
- exitv=1
-}
-
-exit $exitv
diff --git a/storage/bdb/test/scr003/chk.define b/storage/bdb/test/scr003/chk.define
deleted file mode 100644
index 96c45c2b7d7..00000000000
--- a/storage/bdb/test/scr003/chk.define
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.define,v 1.25 2004/10/07 18:54:37 bostic Exp $
-#
-# Check to make sure that all #defines are actually used.
-# Check to make sure that all #defines start in column 1.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-exitv=0
-t1=__1
-t2=__2
-t3=__3
-
-find $d -name '*.c' -o -name '*.cpp' |
- sed -e '/\/php_db4\//d' |
- xargs egrep '^[ ][ ]*#' > $t1
-test -s $t1 && {
- echo "FAIL: found #defines with leading white space:"
- cat $t1
- exit 1
-}
-
-egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
- sed -e '/db_185.in/d' -e '/queue.h/d' -e '/xa.h/d' |
- awk '{print $2}' |
- sed -e '/^B_DELETE/d' \
- -e '/^B_MAX/d' \
- -e '/^DB_BTREEOLDVER/d' \
- -e '/^DB_HASHOLDVER/d' \
- -e '/^DB_LOCKVERSION/d' \
- -e '/^DB_MAX_PAGES/d' \
- -e '/^DB_QAMOLDVER/d' \
- -e '/^DB_TXNVERSION/d' \
- -e '/^DB_UNUSED/d' \
- -e '/^DEFINE_DB_CLASS/d' \
- -e '/^HASH_UNUSED/d' \
- -e '/^HPUX_MUTEX_PAD/d' \
- -e '/^LOG_OP/d' \
- -e '/^MINFILL/d' \
- -e '/^MUTEX_FIELDS/d' \
- -e '/^NAME_TO_SEQUENCE/d' \
- -e '/^NCACHED2X/d' \
- -e '/^NCACHED30/d' \
- -e '/^PAIR_MASK/d' \
- -e '/^P_16_COPY/d' \
- -e '/^P_32_COPY/d' \
- -e '/^P_32_SWAP/d' \
- -e '/^P_TO_UINT16/d' \
- -e '/^QPAGE_CHKSUM/d' \
- -e '/^QPAGE_NORMAL/d' \
- -e '/^QPAGE_SEC/d' \
- -e '/^SIZEOF_PAGE/d' \
- -e '/^TAILQ_/d' \
- -e '/^UINT64_FMT/d' \
- -e '/^UINT64_MAX/d' \
- -e '/^WRAPPED_CLASS/d' \
- -e '/^__BIT_TYPES_DEFINED__/d' \
- -e '/^__DBC_INTERNAL/d' \
- -e '/^i_/d' \
- -e '/_H_/d' \
- -e 's/(.*//' | sort > $t1
-
-find $d -name '*.c' -o -name '*.cpp' > $t2
-for i in `cat $t1`; do
- if egrep -w $i `cat $t2` > /dev/null; then
- :;
- else
- f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
- sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
- echo "FAIL: $i: $f"
- fi
-done | sort -k 2 > $t3
-
-test -s $t3 && {
- cat $t3
- echo "FAIL: found unused #defines"
- exit 1
-}
-
-exit $exitv
diff --git a/storage/bdb/test/scr004/chk.javafiles b/storage/bdb/test/scr004/chk.javafiles
deleted file mode 100644
index 98fb1aff23f..00000000000
--- a/storage/bdb/test/scr004/chk.javafiles
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.javafiles,v 1.7 2003/08/01 18:14:45 gburd Exp $
-#
-# Check to make sure we haven't forgotten to add any Java files to the list
-# of source files in the Makefile.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-f=$d/dist/Makefile.in
-
-t1=__1
-t2=__2
-
-find $d/java $d/examples_java $d/rpc_server/java -name \*.java -print |
- sed -e 's/^.*\///' | sort -u > $t1
-tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort -u > $t2
-
-cmp $t1 $t2 > /dev/null || {
- echo "<<< java source files >>> Makefile"
- diff $t1 $t2
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr005/chk.nl b/storage/bdb/test/scr005/chk.nl
deleted file mode 100644
index c6ccaab6920..00000000000
--- a/storage/bdb/test/scr005/chk.nl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.nl,v 1.7 2003/09/30 19:16:42 bostic Exp $
-#
-# Check to make sure that there are no trailing newlines in __db_err calls.
-
-d=../..
-
-[ -f $d/README ] || {
- echo "FAIL: chk.nl can't find the source directory."
- exit 1
-}
-
-cat << END_OF_CODE > t.c
-#include <sys/types.h>
-
-#include <errno.h>
-#include <stdio.h>
-
-int chk(FILE *, char *);
-
-int
-main(argc, argv)
- int argc;
- char *argv[];
-{
- FILE *fp;
- int exitv;
-
- for (exitv = 0; *++argv != NULL;) {
- if ((fp = fopen(*argv, "r")) == NULL) {
- fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
- return (1);
- }
- if (chk(fp, *argv))
- exitv = 1;
- (void)fclose(fp);
- }
- return (exitv);
-}
-
-int
-chk(fp, name)
- FILE *fp;
- char *name;
-{
- int ch, exitv, line, q;
-
- exitv = 0;
- for (ch = 'a', line = 1;;) {
- if ((ch = getc(fp)) == EOF)
- return (exitv);
- if (ch == '\n') {
- ++line;
- continue;
- }
- if (!isspace(ch)) continue;
- if ((ch = getc(fp)) != '_') continue;
- if ((ch = getc(fp)) != '_') continue;
- if ((ch = getc(fp)) != 'd') continue;
- if ((ch = getc(fp)) != 'b') continue;
- if ((ch = getc(fp)) != '_') continue;
- if ((ch = getc(fp)) != 'e') continue;
- if ((ch = getc(fp)) != 'r') continue;
- if ((ch = getc(fp)) != 'r') continue;
- if ((ch = getc(fp)) != '(') continue;
- while ((ch = getc(fp)) != '"') {
- if (ch == EOF)
- return (exitv);
- if (ch == '\n')
- ++line;
- }
- while ((ch = getc(fp)) != '"')
- switch (ch) {
- case EOF:
- return (exitv);
- case '\\n':
- ++line;
- break;
- case '.':
- if ((ch = getc(fp)) != '"')
- ungetc(ch, fp);
- else {
- fprintf(stderr,
- "%s: <period> at line %d\n", name, line);
- exitv = 1;
- }
- break;
- case '\\\\':
- if ((ch = getc(fp)) != 'n')
- ungetc(ch, fp);
- else if ((ch = getc(fp)) != '"')
- ungetc(ch, fp);
- else {
- fprintf(stderr,
- "%s: <newline> at line %d\n", name, line);
- exitv = 1;
- }
- break;
- }
- }
- return (exitv);
-}
-END_OF_CODE
-
-cc t.c -o t
-if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
- :
-else
- echo "FAIL: found __db_err calls ending with periods/newlines."
- exit 1
-fi
-
-exit 0
diff --git a/storage/bdb/test/scr006/chk.offt b/storage/bdb/test/scr006/chk.offt
deleted file mode 100644
index 69bf7cc9219..00000000000
--- a/storage/bdb/test/scr006/chk.offt
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.offt,v 1.13 2004/10/07 18:57:53 bostic Exp $
-#
-# Make sure that no off_t's have snuck into the release.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t=__1
-
-egrep -w off_t $d/*/*.[ch] $d/*/*.in |
-sed -e "/#undef off_t/d" \
- -e "/db_env_set_func_ftruncate/d" \
- -e "/db_env_set_func_pread/d" \
- -e "/db_env_set_func_pwrite/d" \
- -e "/db_env_set_func_seek/d" \
- -e "/j_ftruncate/d" \
- -e "/j_pread/d" \
- -e "/j_pwrite/d" \
- -e "/j_seek/d" \
- -e "/mp_fopen.c:.*can't use off_t's here/d" \
- -e "/mp_fopen.c:.*size or type off_t's or/d" \
- -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
- -e "/mutex\/tm.c:/d" \
- -e "/os_map.c:.*(off_t)0))/d" \
- -e "/os_method.c.*func_ftruncate/d" \
- -e "/os_method.c.*func_pread/d" \
- -e "/os_method.c.*func_pwrite/d" \
- -e "/os_method.c.*func_seek/d" \
- -e "/os_method.c.*__P.*off_t/d" \
- -e "/os_rw.c:/d" \
- -e "/os_seek.c:.*off_t offset;/d" \
- -e "/os_seek.c:.*offset = /d" \
- -e "/os_truncate.c:.*off_t offset;/d" \
- -e "/os_truncate.c:.*off_t stat_offset;/d" \
- -e "/os_truncate.c:.*offset = /d" \
- -e "/test_perf\/perf_misc.c:/d" \
- -e "/test_server\/dbs.c:/d" \
- -e "/test_vxworks\/vx_mutex.c:/d" > $t
-
-test -s $t && {
- cat $t
- echo "FAIL: found questionable off_t usage"
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr007/chk.proto b/storage/bdb/test/scr007/chk.proto
deleted file mode 100644
index 05e980ce6fc..00000000000
--- a/storage/bdb/test/scr007/chk.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.proto,v 1.10 2003/12/08 19:28:26 bostic Exp $
-#
-# Check to make sure that prototypes are actually needed.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__1
-t2=__2
-t3=__3
-
-egrep '__P' $d/dbinc_auto/*.h |
- sed -e 's/[ ][ ]*__P.*//' \
- -e 's/^.*[ *]//' \
- -e '/__db_cprint/d' \
- -e '/__db_lprint/d' \
- -e '/__db_noop_log/d' \
- -e '/__db_prnpage/d' \
- -e '/__db_txnlist_print/d' \
- -e '/__db_util_arg/d' \
- -e '/__ham_func2/d' \
- -e '/__ham_func3/d' \
- -e '/_print$/d' \
- -e '/_read$/d' > $t1
-
-find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
-for i in `cat $t1`; do
- c=$(egrep -Hlw $i $(cat $t2) | wc -l)
- echo "$i: $c"
-done | egrep ' 1$' > $t3
-
-test -s $t3 && {
- cat $t3
- echo "FAIL: found unnecessary prototypes."
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr008/chk.pubdef b/storage/bdb/test/scr008/chk.pubdef
deleted file mode 100644
index f5e66ccbc69..00000000000
--- a/storage/bdb/test/scr008/chk.pubdef
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/bin/sh -
-#
-# Reconcile the list of public defines with the man pages and the Java files.
-
-d=../..
-docs=$d/../db_docs
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-p=$d/dist/pubdef.in
-
-exitv=0
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that pubdef.in has everything listed in m4.links.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-f=$docs/m4/m4.links
-sed -n \
- -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
- -e d < $f |
-while read name; do
- if `egrep -w "$name" $p > /dev/null`; then
- :
- else
- echo "$f: $name is missing from $p"
- exitv=1
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that m4.links has everything listed in pubdef.in.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-f=$docs/m4/m4.links
-sed '/^#/d' $p |
-while read name isdoc isinc isjava; do
- if `egrep -w "^.1, $name" $f > /dev/null`; then
- [ "X$isdoc" != "XD" ] && {
- echo "$name should not appear in $f"
- exitv=1
- }
- else
- [ "X$isdoc" = "XD" ] && {
- echo "$name does not appear in $f"
- exitv=1;
- }
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that pubdef.in has everything listed in db.in.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-f=$d/dbinc/db.in
-sed -n \
- -e 's/^#define[ ]*\(DB_[A-Z_0-9][A-Z_0-9]*\).*/\1/p' \
- -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
- -e d < $f |
-while read name; do
- if `egrep -w "$name" $p > /dev/null`; then
- :
- else
- echo "$f: $name is missing from $p"
- exitv=1
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that db.in has everything listed in pubdef.in.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-f=$d/dbinc/db.in
-sed '/^#/d' $p |
-while read name isdoc isinc isjava; do
- if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
- $f > /dev/null`; then
- [ "X$isinc" != "XI" ] && {
- echo "$name should not appear in $f"
- exitv=1
- }
- else
- [ "X$isinc" = "XI" ] && {
- echo "$name does not appear in $f"
- exitv=1
- }
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that pubdef.in has everything listed in DbConstants.java.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-j=$d/java/src/com/sleepycat/db
-f=$j/internal/DbConstants.java
-sed -n -e 's/.*int[ ]\([^ ]*\).*;/\1/p' < $f |
-while read name; do
- if `egrep -w "$name" $p > /dev/null`; then
- :
- else
- echo "$f: $name is missing from $p"
- exitv=1
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that DbConstants.java has everything listed in pubdef.in.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-f=$j/internal/DbConstants.java
-sed '/^#/d' $p |
-while read name isdoc isinc isjava; do
- if `egrep -w "int[ ]$name =" $f > /dev/null`; then
- [ "X$isjava" != "XJ" ] && {
- echo "$name should not appear in $f"
- exitv=1
- }
- else
- [ "X$isjava" = "XJ" ] && {
- echo "$name does not appear in $f"
- exitv=1
- }
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that all constants in pubdef.in are wrapped by the Java API.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-sed '/^#/d' $p |
-while read name isdoc isinc isjava; do
- if `egrep -w "$name" $j/*.java $d/rpc_server/java/*.java \
- $j/internal/Db.java $j/internal/DbEnv.java \
- $j/internal/db_javaJNI.java > /dev/null`; then
- [ "X$isjava" != "XJ" ] && {
- echo "$name should not appear in the Java API"
- exitv=1
- }
- else
- [ "X$isjava" = "XJ" ] && {
- echo "$name does not appear in the Java API"
- exitv=1
- }
- fi
-done
-
-cat <<END_OF_TEXT
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Check that all constants in pubdef.in are wrapped by the Java native layer.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-END_OF_TEXT
-
-sed '/^#/d' $p |
-while read name isdoc isinc isjava; do
- if `egrep -w "$name" $d/libdb_java/db_java_wrap.c > /dev/null`; then
- [ "X$isjava" != "XN" ] && [ "X$isjava" != "XJ" ] && {
- echo "$name should not appear in the Java native layer"
- exitv=1
- }
- else
- [ "X$isjava" = "XN" ] && {
- echo "$name does not appear in the Java native layer"
- exitv=1
- }
- fi
-done
-
-exit $exitv
diff --git a/storage/bdb/test/scr009/chk.srcfiles b/storage/bdb/test/scr009/chk.srcfiles
deleted file mode 100644
index 18fc6369288..00000000000
--- a/storage/bdb/test/scr009/chk.srcfiles
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.srcfiles,v 1.13 2004/10/07 19:16:43 bostic Exp $
-#
-# Check to make sure we haven't forgotten to add any files to the list
-# of source files Win32 uses to build its dsp files.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-f=$d/dist/srcfiles.in
-t1=__1
-t2=__2
-
-sed -e '/^[ #]/d' \
- -e '/^db_server_clnt.c/d' \
- -e '/^db_server_svc.c/d' \
- -e '/^db_server_xdr.c/d' \
- -e '/^gen_db_server.c/d' \
- -e '/^$/d' < $f |
- awk '{print $1}' > $t1
-find $d -type f |
- sed -e 's/^\.\.\/\.\.\///' \
- -e '/^build[^_]/d' \
- -e '/^libdb_java\/java_stat_auto.c/d' \
- -e '/^mod_db4\//d' \
- -e '/^perl\//d' \
- -e '/^php_db4\//d' \
- -e '/^rpc_server\/c\/gen_db_server.c/d' \
- -e '/^test\//d' \
- -e '/^test_server/d' \
- -e '/^test_thread/d' \
- -e '/^test_vxworks/d' |
- egrep '\.c$|\.cpp$|\.def$|\.rc$' |
- sort > $t2
-
-cmp $t1 $t2 > /dev/null || {
- echo "<<< srcfiles.in >>> existing files"
- diff $t1 $t2
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr010/chk.str b/storage/bdb/test/scr010/chk.str
deleted file mode 100644
index 873fdb0124a..00000000000
--- a/storage/bdb/test/scr010/chk.str
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.str,v 1.9 2004/09/22 18:01:06 bostic Exp $
-#
-# Check spelling in quoted strings.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__t1
-
-sed -e '/^#include/d' \
- -e '/"/!d' \
- -e 's/^[^"]*//' \
- -e 's/%s/ /g' \
- -e 's/[^"]*$//' \
- -e 's/\\[nt]/ /g' \
- `find $d -name '*.[ch]' -o -name '*.cpp' -o -name '*.java' |
- sed '/\/perl\//d'` |
-spell | sort | comm -23 /dev/stdin spell.ok > $t1
-
-test -s $t1 && {
- cat $t1
- echo "FAIL: found questionable spelling in strings."
- exit 1
-}
-
-egrep -h '/\* | \* ' \
- `find $d -name '*.[ch]' -o -name '*.cpp' | sed '/\/perl\//d'` |
-spell | sort | comm -23 /dev/stdin spell.ok | tee /tmp/f/1 > $t1
-
-test -s $t1 && {
- cat $t1
- echo "FAIL: found questionable spelling in comments."
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr010/spell.ok b/storage/bdb/test/scr010/spell.ok
deleted file mode 100644
index 6a557279b67..00000000000
--- a/storage/bdb/test/scr010/spell.ok
+++ /dev/null
@@ -1,3697 +0,0 @@
-AAAA
-ABS
-ADDR
-AES
-AIX's
-AJVX
-ALG
-ALLBACK
-ALLDB
-ALLOC
-ALLZEROES
-API
-APIs
-APP
-APPMALLOC
-APPNAME
-APPREC
-ASYNC
-ATOI
-AUTOCOMMIT
-AUTOREMOVE
-AccessExample
-AccesssExample
-Acflmo
-Acknowledgements
-Aclmop
-Aclop
-Adata
-Addr
-Ahlm
-Ahm
-Antoon
-Applock
-ArithmeticException
-Arntzen
-ArrayList
-AssociateCallbacks
-AttachCurrentThread
-Aug
-BBBB
-BC
-BCFILprRsvVxX
-BCc
-BDBXXXXX
-BDBXXXXXX
-BEGID
-BH
-BH's
-BI
-BII
-BINTERNAL
-BITSPERBLOCK
-BKEYDATA
-BLKSIZE
-BNF
-BOTHC
-BOVERFLOW
-BR
-BSIZE
-BTCOMPARE
-BTMETA
-BTREE
-BTREEMAGIC
-BTREEMETA
-BTREEOLDVER
-BTREEVERSION
-BUF
-Backoff
-Barreto
-Bc
-Bdata
-BerkeleyDB
-BigInteger
-BindingSpeedTest
-Bosselaers
-BtRecExample
-Btree
-BtreeStat
-BtreeStats
-Btrees
-BulkAccessExample
-ByteArray
-ByteArrayBinding
-ByteArrayFormat
-ByteArrayInputStream
-ByteArrayOutputStream
-C'est
-CALLBACK
-CALLPGIN
-CBC
-CC
-CCCC
-CCYYMMDDhhmm
-CD
-CDB
-CDS
-CDdFILTVvX
-CFB
-CFILpRsv
-CFLprsvVxX
-CFh
-CHARKEY
-CHGPG
-CHILDCOMMIT
-CHILDINFO
-CHILDINFOs
-CHK
-CHKPNT
-CHKPOINT
-CHKSUM
-CKP
-CKPLSN
-CL
-CLASSPATH
-CLOSEFP
-CLR
-CLRDBC
-CLpsvxX
-CMP
-CNT
-COMPQUIET
-CONCAT
-CONCATDATAKEY
-CONCATKEYDATA
-CONF
-CONFIG
-CONST
-CRTL
-CRYPTO
-CT
-CTX
-CXX
-CacheFilePriority
-CacheFileStats
-CacheStats
-Callback
-CdFILTvX
-Ch
-ClassCastException
-ClassCatalog
-ClassCatalogDB
-ClassInfo
-ClassNotFoundException
-ClientData
-CloseHandle
-Cmd
-Cmp
-CollectionTest
-Config
-CopyObjBytes
-CreateFile
-CreateFileMapping
-CreateHashEntry
-Crypto
-CurrentTransaction
-Cygwin
-DATAHOME
-DBC
-DBCursor
-DBENV
-DBHOME
-DBINFO
-DBLOCAL
-DBLOG
-DBM
-DBMETA
-DBMETASIZE
-DBNAME
-DBP
-DBP's
-DBREG
-DBREP
-DBS
-DBSDIR
-DBT
-DBT's
-DBTCL
-DBTList
-DBTYPE
-DBTs
-DBa
-DBaa
-DBaz
-DBba
-DBcursor
-DBz
-DEADFILE
-DECLS
-DEF
-DEFMINKEYPAGE
-DELNO
-DGREE
-DIR
-DIRECTIO
-DIRENT
-DIST
-DJ
-DLFCN
-DLL
-DOALL
-DONOTINDEX
-DS
-DST
-DSYNC
-DUP
-DUPFIRST
-DUPID
-DUPLAST
-DUPMASTER
-DUPOK
-DUPONLY
-DUPS
-DUPSET
-DUPSORT
-DataBinding
-DataBuffer
-DataCursor
-DataDb
-DataEnvironment
-DataFormat
-DataIndex
-DataInput
-DataInputStream
-DataOutput
-DataOutputStream
-DataStore
-DataThang
-DataType
-DataView
-DatabaseEntry
-DatabaseException
-DatabaseType
-Db
-DbAppDispatch
-DbAppendRecno
-DbAssociate
-DbAttachImpl
-DbBtreeCompare
-DbBtreePrefix
-DbBtreeStat
-DbClient
-DbCount
-DbDeadlockException
-DbDelete
-DbDispatcher
-DbDupCompare
-DbEnv
-DbEnvFeedback
-DbEnvFeedbackHandler
-DbErrcall
-DbErrorHandler
-DbException
-DbFeedback
-DbFeedbackHandler
-DbGet
-DbGetFlags
-DbGetOpenFlags
-DbGetjoin
-DbHash
-DbHashStat
-DbInfoDelete
-DbKeyRange
-DbLock
-DbLockNotGrantedException
-DbLockRequest
-DbLockStat
-DbLogStat
-DbLogc
-DbLsn
-DbMemoryException
-DbMpoolFStat
-DbMpoolFile
-DbMpoolFileStat
-DbMpoolStat
-DbMultiple
-DbMultipleDataIterator
-DbMultipleIterator
-DbMultipleKeyDataIterator
-DbMultipleRecnoDataIterator
-DbOpen
-DbOutputStreamErrcall
-DbPanicHandler
-DbPreplist
-DbPut
-DbQueueStat
-DbRecoveryInit
-DbRemove
-DbRename
-DbRepStat
-DbRepTransport
-DbRunRecoveryException
-DbSecondaryKeyCreate
-DbSequence
-DbServer
-DbStat
-DbTestUtil
-DbTruncate
-DbTxn
-DbTxnRecover
-DbTxnStat
-DbUpgrade
-DbUtil
-DbVerify
-DbXA
-DbXAResource
-DbXid
-Dbc
-DbcDup
-DbcGet
-DbcPut
-Dbm
-DbmCommand
-Dbp
-Dbs
-Dbt
-Dbt's
-Dbts
-Dde
-Deadfile
-DeadlockException
-Debian
-DeleteInfo
-Deref'ing
-Dir
-Dups
-EAGAIN
-EBUSY
-ECB
-EEXIST
-EEXISTS
-EFAULT
-EGENCHG
-EID
-EINTR
-EINVAL
-EIO
-EIRT
-EIi
-ELECTINIT
-ELECTVOTE
-EMSG
-ENOENT
-ENOMEM
-ENT
-ENV
-ENV's
-EOFException
-EPG
-EPGNO
-EPHASE
-EPRINT
-EPRINTed
-ETIME
-ETIMEDOUT
-EXCL
-EXT
-Eefh
-Egen
-Elp
-Endian
-EntityBinding
-EnvAttr
-EnvExample
-EnvGetEncryptFlags
-EnvInfoDelete
-EnvOpen
-EnvRemove
-EnvSetErrfile
-EnvSetErrpfx
-EnvSetFlags
-EnvTest
-EnvVerbose
-Equidistributed
-Errcall
-Errfile
-ErrorFunc
-ErrorSetup
-Errpfx
-EvalObjv
-ExampleDatabaseLoad
-ExampleDatabaseRead
-ExceptionUnwrapper
-ExceptionWrapper
-ExceptionWrapperTest
-Exp
-Externalizable
-FALLTHROUGH
-FCNTL
-FCONTROL
-FD
-FDATASYNC
-FF
-FH
-FILEDONE
-FILEID
-FILELIST
-FILENO
-FILEOPEN
-FILEWRITTEN
-FIXEDLEN
-FIXLEN
-FIXME
-FMAP
-FMT
-FN
-FNAME
-FOREACH
-FP
-FST
-FSTAT
-FSTATI
-FTRUNCATE
-FTYPE
-FastInputStream
-FastOutputStream
-FatalRecover
-Fd
-Ff
-Fh
-FileIndexHigh
-FileIndexLow
-FileNotFoundException
-Fileinfo
-FindHashEntry
-FooImp
-Foreach
-ForeignKeyIndex
-ForeignKeyTest
-FreeBSD
-FreeBSD's
-FreeFunc
-FreeList
-Friedl
-GCC
-GETALL
-GETCWD
-GETDYNAMIC
-GETNAME
-GETOPT
-GETRUSAGE
-GETTIME
-GETTIMEOFDAY
-GETUID
-GETZIP
-Gb
-Gcc
-Gentles
-Get's
-GetByteArray
-GetByteArrayFromObj
-GetDiskFreeSpace
-GetFileInformationByHandle
-GetFlags
-GetFlagsList
-GetGlobPrefix
-GetHashValue
-GetIndexFromObj
-GetIntFromObj
-GetJavaVM
-GetJoin
-GetLockDetect
-GetLongFromObj
-GetLsn
-GetOpenFlag
-GetTimeout
-GetUInt
-GetUnsignedIntFromObj
-GetVerbose
-GetVersion
-Gh
-GlobalRefs
-GotRange
-HANDSOFF
-HASHC
-HASHC's
-HASHHDR
-HASHINSERT
-HASHLOOKUP
-HASHMAGIC
-HASHMETA
-HASHOLDVER
-HASHREMOVE
-HASHTAB
-HASHVERSION
-HCommand
-HDR
-HDRs
-HEURCOM
-HEURHAZ
-HEURMIX
-HEURRB
-HKEYDATA
-HMAC
-HMETA
-HOFFDUP
-HOFFPAGE
-HOFFSET
-HOLDELECTION
-HPPA
-HPUX
-HSEARCH
-Harbison
-HashStats
-Hashtable
-HelloDatabaseWorld
-Holder's
-Hsearch
-IA
-IAFTER
-IBEFORE
-IBTREE
-ICURRENT
-IDLETIMEOUT
-IDs
-IIL
-IL
-ILOCK
-ILo
-ILprR
-INDX
-INFOTYPE
-INI
-INIT
-INITED
-INITENV
-INITSPIN
-INMEM
-INMEMORY
-INORDER
-INTTYPES
-INVAL
-INVALIDID
-IOException
-IOExceptionWrapper
-IOSIZE
-IPC
-IR
-IREAD
-IRECNO
-IRGRP
-IRIX
-IROTH
-IRUSR
-ISDUP
-ISPERM
-ISSET
-IV's
-IW
-IWGRP
-IWOTH
-IWR
-IWRITE
-IWUSR
-Ick
-Ids
-Ik
-IllegalArgumentException
-IllegalStateException
-IncrRefCount
-IndexOutOfBoundsException
-Init
-Initialise
-IntegrityConstraintException
-Interp
-InventoryDB
-Istmp
-ItemNameIndexDB
-Itemname
-IterDeadlockTest
-JDB
-JE
-JHB
-JKL
-JNI
-JNIEnv
-JNIs
-JOINCUR
-JOINENV
-JVM
-JZ
-JavaIO
-JavaRPCServer
-JoinTest
-KEYDATA
-KEYEMPTY
-KEYEXIST
-KEYFIRST
-KEYGROUP
-KEYGRP
-KEYLAST
-KEYLEN
-KL
-Kerberos
-KeyExtractor
-KeyRange
-KeyRangeException
-KeyRangeTest
-Krinsky
-LANGLVL
-LASTCKP
-LBTREE
-LCK
-LDF
-LDUP
-LEAFCHAIN
-LEAFLEVEL
-LEAFSEEN
-LFNAME
-LFPREFIX
-LG
-LGPL
-LIBNSL
-LL
-LOCKDOWN
-LOCKOBJ
-LOCKREGION
-LOCKREQ
-LOCKTAB
-LOCKTIMEOUT
-LOCKVERSION
-LOGC
-LOGFILEID
-LOGMAGIC
-LOGOLDVER
-LOGP
-LOGREADY
-LOGSONLY
-LOGVERSION
-LORDER
-LRECNO
-LRECNODUP
-LRU
-LRUness
-LSN
-LSN's
-LSNfile
-LSNoffset
-LSNs
-LSTAT
-LV
-LWARX
-LWP
-LWZ
-Landon
-Lastp
-Lcom
-ListIterator
-ListObjAppendElement
-Ljava
-Ll
-LocalIterator
-LockDetect
-LockDetectMode
-LockExample
-LockGet
-LockMode
-LockNotGrantedException
-LockOperation
-LockRequest
-LockRequestMode
-LockStat
-LockStats
-LockTimeout
-LockVec
-Lockfhp
-LogArchive
-LogCompare
-LogFile
-LogFlush
-LogGet
-LogPut
-LogRegister
-LogSequenceNumber
-LogStat
-LogStats
-Logc
-LogcGet
-LpRsS
-LprRsS
-Lsn
-LtoR
-MALLOC
-MAMAMIA
-MARGO
-MASTERELECT
-MAXARGS
-MAXBQUALSIZE
-MAXBTREELEVEL
-MAXFIELD
-MAXGTRIDSIZE
-MAXID
-MAXINFOSIZE
-MAXLOCKS
-MAXMMAPSIZE
-MAXNR
-MAXPATHLEN
-MAXSIZE
-MAXSIZEONPAGE
-MAXTIMEOUT
-MAXWRITE
-MC
-MEM
-MEMCMP
-MEMCPY
-MEMMAPPED
-MEMMOVE
-MEMP
-METADIRTY
-MFT
-MINCACHE
-MINFO
-MINIT
-MINLOCKS
-MINPAGECACHE
-MINWRITE
-MKS
-MLOCK
-MMAP
-MMDDhhmm
-MNO
-MP
-MPE
-MPFARRAY
-MPOOL
-MPOOLFILE
-MPOOLFILE's
-MPOOLFILEs
-MPREG
-MPREGs
-MSB
-MSC
-MSEM
-MSG
-MSGBUF
-MSHUTDOWN
-MSTR
-MSVC
-MT
-MUNLOCK
-MUNMAP
-MUTEXes
-MYDIRECTORY
-Makoto
-Malloc
-MapEntry
-MapViewOfFile
-Margo
-MarshalledEnt
-MarshalledEntityBinding
-MarshalledKey
-MarshalledKeyBinding
-MarshalledObject
-MarshalledTupleData
-MarshalledTupleEntry
-MarshalledTupleKeyEntity
-Matsumoto
-Maxid
-Maxkey
-Mb
-Mbytes
-MemoryException
-Mempool
-Mersenne
-Metadata
-Metapage
-MinGW
-Minkey
-Misc
-MixColumn
-MoveFile
-MoveFileEx
-Mp
-MpGet
-MpInfoDelete
-MpStat
-MpSync
-MpTrickle
-Mpool
-MpoolExample
-Mpoolfile
-Msg
-MsgType
-Mutex
-Mv
-MyDbs
-NB
-NBUCKET
-NCACHE
-NCACHED
-NDBM
-NDIR
-NEEDSPLIT
-NEEDSWAP
-NEWCLIENT
-NEWFILE
-NEWMASTER
-NEWSITE
-NG
-NOARCHIVE
-NOAUTO
-NOBUFFER
-NOCOPY
-NODUP
-NODUPDATA
-NODUPS
-NOFILE
-NOHEADER
-NOKEY
-NOLOCK
-NOLOCKING
-NOMIGRATE
-NOMMAP
-NOMORE
-NOORDERCHK
-NOOVERWRITE
-NOPANIC
-NOPROMOTE
-NORUN
-NOSERVER
-NOSORT
-NOSYNC
-NOTA
-NOTEXIST
-NOTFOUND
-NOTGRANTED
-NOTPERM
-NOTREACHED
-NOTSET
-NOTUSED
-NOTYPE
-NOTZERO
-NOWAIT
-NP
-NRECS
-NT
-NTFS
-NULL'ing
-NULLXID
-NULLing
-NULLs
-NULs
-NUM
-NUMWRITES
-NameToInfo
-NameToPtr
-Ndbm
-NdbmOpen
-NewInfo
-NewStringObj
-Nishimura
-NoP
-NoqV
-NqV
-Nr
-NrV
-NsV
-Nuff
-NullClassCatalog
-NullPointerException
-NullTransactionRunner
-Num
-Nxt
-OBJ
-ODDFILESIZE
-OFFDUP
-OFFPAGE
-OLDVERSION
-ONC
-OOB
-OP
-OPD
-OPENFILES
-OPFLAGS
-OR'd
-ORDERCHKONLY
-OSF
-OSO
-OUTFILE
-OVFL
-Obj
-ObjectInputStream
-ObjectOutputStream
-ObjectStreamClass
-Objs
-Offpage
-Ol
-OpenFileMapping
-OpenServer
-OperationStatus
-Ops
-Optimised
-OutOfMemoryError
-OutputStream
-PAGEDONE
-PAGEINFO
-PAGEINFOs
-PAGELIST
-PAGEs
-PANIC'd
-PARAMS
-PARENT's
-PBNYC
-PG
-PGDEF
-PGINFO
-PGNO
-PGSIZE
-PHP
-PID
-PKG
-PLIST
-POPENFILES
-POSTDESTROY
-POSTLOG
-POSTLOGMETA
-POSTOPEN
-POSTSYNC
-PPC
-PR
-PREAD
-PREPLIST
-PREV
-PRI
-PRINTFOOTER
-PRINTHEADER
-PROT
-PSIZE
-PSTAT
-PTHREAD
-PWRITE
-PaRisc
-Pagesize
-Pagesizes
-Params
-Part's
-PartBinding
-PartData
-PartKey
-PartKeyBinding
-PartValue
-Paulo
-Perl
-Pg
-PgInit
-PgIsset
-Pgin
-Pgno
-Phong
-PlatformSDK
-Posix
-PowerPC
-PreparedTransaction
-Prev
-PrimaryKeyAssigner
-Proc
-Pthread
-PtrToInfo
-QAM
-QAMDATA
-QAMMAGIC
-QAMMETA
-QAMOLDVER
-QAMVERSION
-QMETA
-QNX
-QPAGE
-QUOTESERVER
-QueueStats
-RB
-RBBASE
-RBCOMMFAIL
-RBDEADLOCK
-RBEND
-RBINTEGRITY
-RBOTHER
-RBPROTO
-RBROLLBACK
-RBTIMEOUT
-RBTRANSIENT
-RCLOSE
-RDONLY
-RDWRMASTER
-READONLY
-REALLOC
-REALLOC'ed
-REC
-RECLEN
-RECNO
-RECNOSYNC
-RECNUM
-RECORDCOUNT
-RECOVERYTEST
-REGENV
-REGINFO
-REGIONs
-REGMAINT
-RELEN
-RELIANTUNIX
-RENAMEMAGIC
-REPLOCKED
-REPQUOTE
-REPVERSION
-REQ
-REQs
-REVERSECONCAT
-REVERSEDATA
-REVSPLITOFF
-RIJNDAEL
-RINTERNAL
-RIW
-RLOCK
-RM
-RMERR
-RMFAIL
-RMID
-RMNAMESZ
-RMW
-RMs
-ROP
-RPC
-RPCCLIENT
-RPCExample
-RPCGEN
-RPRINT
-RT
-RTTarget
-RUNLOG
-RUNRECOVERY
-RUSAGE
-RandCommand
-RangeExceeded
-RangeKeyNotEqual
-RangeNotFound
-Realloc
-Rec
-Recno
-Recnos
-RecordNumberBinding
-RecordNumberFormat
-RecoveryOperation
-Reinit
-RepElect
-RepElectResult
-RepFlush
-RepLimit
-RepProcessMessage
-RepRequest
-RepStart
-RepStat
-ReplicationHandleDeadException
-ReplicationStats
-ReplicationStatus
-ResetResult
-ReturnSetup
-Rieffel
-Rijmen
-Rijndael
-Roeber
-Rp
-RpcDb
-RpcDbEnv
-RpcDbTxn
-RpcDbc
-RtoL
-RunRecoveryException
-RuntimeException
-RuntimeExceptionWrapper
-Rusage
-SCHED
-SCO
-SCO's
-SEGDATA
-SEGID
-SEM
-SEMA
-SEP
-SEQ
-SERVERPROG
-SERVERVERS
-SETALL
-SETCURSOR
-SETFD
-SETVAL
-SGI
-SHA
-SHALLOC
-SHASH
-SHMEM
-SHMGET
-SHQUEUE
-SIGALRM
-SIGPIPE
-SIZEOF
-SKIPFIRSTKEY
-SKU
-SNPRINTF
-SPL
-SPLITOLD
-SPRINTF
-SS
-SSLeay
-SSZ
-STAILQ
-STARTUPDONE
-STAT
-STATS
-STCWX
-STD
-STDC
-STDERR
-STDINT
-STK
-STR
-STRCASECMP
-STRDUP
-STRLIST
-STROFFSET
-STRTOUL
-STRUCT
-STWCX
-SUBDB
-SWAPBYTES
-SWIG's
-SWITCHes
-SWR
-SYSCONF
-SampleDatabase
-SampleViews
-Schlossnagle
-SecondaryDeadlockTest
-Sedgewick
-Seq
-SeqGet
-SeqGetFlags
-SeqOpen
-SequenceExample
-SequenceStats
-SerialBinding
-SerialBindingTest
-SerialFormat
-SerialInput
-SerialOutput
-SerialSerialBinding
-SerialSerialKeyExtractor
-SetEndOfFile
-SetInfoData
-SetListElem
-SetListElemInt
-SetListElemWideInt
-SetListRecnoElem
-SetMultiList
-SetObjResult
-ShipmentBinding
-ShipmentByPart
-ShipmentBySupplier
-ShipmentData
-ShipmentKey
-ShipmentKeyBinding
-ShipmentValue
-Shm
-Signalling
-SimpleBuffer
-Skiplist
-Skodon
-Sleepycat
-Something's
-SortedMap
-SortedSet
-Sparc
-Splitp
-Stat
-Stats
-Std
-Stdout
-Steele
-StoredClassCatalog
-StoredClassCatalogTest
-StoredClassCatalogTestInit
-StoredCollection
-StoredCollections
-StoredContainer
-StoredEntrySet
-StoredIterator
-StoredKeySet
-StoredList
-StoredMap
-StoredMapEntry
-StoredSortedEntrySet
-StoredSortedKeySet
-StoredSortedMap
-StoredSortedValueSet
-StoredValueSet
-StringBuffer
-StringDbt
-Subdatabase
-Subdb
-Subname
-SunOS
-SupplierBinding
-SupplierByCity
-SupplierData
-SupplierKey
-SupplierKeyBinding
-SupplierValue
-SystemInfo
-TAILQ
-TCHAR
-TCL
-TDS
-TESTDIR
-TESTTESTEST
-TESTXADIR
-THR
-TID
-TLPUT
-TM
-TMASYNC
-TMENDRSCAN
-TMER
-TMERR
-TMFAIL
-TMJOIN
-TMMIGRATE
-TMMULTIPLE
-TMNOFLAGGS
-TMNOFLAGS
-TMNOMIGRATE
-TMNOWAIT
-TMONEPHASE
-TMP
-TMPDIR
-TMREGISTER
-TMRESUME
-TMSTARTRSCAN
-TMSUCCESS
-TMSUSPEND
-TMUSEASYNC
-TMs
-TODO
-TOPLEVEL
-TPC
-TPCB
-TPS
-TRU
-TRUNC
-TRUNCDATA
-TSTRING
-TXN
-TXNAPP
-TXNHEAD
-TXNID
-TXNLIST
-TXNLOGREC
-TXNMGR
-TXNREGION
-TXNS
-TXNVERSION
-TXNs
-Takuji
-Tcl
-Tcl's
-TempFolder
-TestAppendRecno
-TestAssociate
-TestCallback
-TestClassCatalog
-TestClosedDb
-TestConstruct
-TestDataBinding
-TestDbtFlags
-TestEntity
-TestEntityBinding
-TestEnv
-TestGetSetMethods
-TestKeyAssigner
-TestKeyExtractor
-TestKeyRange
-TestLockVec
-TestLogc
-TestOpenEmpty
-TestReplication
-TestRpcServer
-TestSameDbt
-TestSerial
-TestSimpleAccess
-TestStat
-TestStore
-TestTruncate
-TestUtil
-TestXAServlet
-Thang
-Thies
-Threshhold
-Throwable
-TimeUnits
-Tmp
-Topher
-TpcbExample
-TransactionRunner
-TransactionStats
-TransactionTest
-TransactionTests
-TransactionWorker
-Tru
-Tt
-TupleBinding
-TupleBindingTest
-TupleFormat
-TupleFormatTest
-TupleInput
-TupleInputBinding
-TupleMarshalledBinding
-TupleOrderingTest
-TupleOutput
-TupleSerialBinding
-TupleSerialDbFactory
-TupleSerialDbFactoryTest
-TupleSerialEntityBinding
-TupleSerialKeyExtractor
-TupleSerialMarshalledBinding
-TupleSerialMarshalledKeyExtractor
-TupleTupleBinding
-TupleTupleKeyExtractor
-TupleTupleMarshalledBinding
-TupleTupleMarshalledKeyExtractor
-Txn
-TxnCheckpoint
-TxnInfoDelete
-TxnRecover
-TxnStat
-TxnTimeout
-Txnid
-Txns
-UI
-UID
-UINT
-ULONG
-UMRW
-UNAVAIL
-UNDEF
-UNDOC
-UNICODE
-UNISTD
-UNREF
-UOC
-UPDATEROOT
-UPREFIX
-USEC
-USERMEM
-UTF
-UTFDataFormatException
-UTS
-UX
-Unencrypted
-Unicode
-UnixLib
-UnixWare
-Unixware
-UnknownError
-UnmapViewOfFile
-UnsupportedOperationException
-UtfOps
-UtfTest
-Util
-VC
-VER
-VM
-VMPAGESIZE
-VRFY
-VSNPRINTF
-VTALLY
-VX
-Var
-Vc
-VendorDB
-Vo
-Voter's
-Vv
-VvW
-VvXxZ
-Vvw
-Vx
-VxWorks
-WAITSFOR
-WAKEME
-WATCOM
-WLInitialContextFactory
-WORDLIST
-WRITECURSOR
-WRITELOCK
-WRITEOPEN
-WRNOSYNC
-WRONLY
-WT
-WW
-WWRITE
-Waitsfor
-WebLogic
-WinNT
-WriteFile
-X's
-XA
-XAER
-XAException
-XAResource
-XID
-XIDDATASIZE
-XOR'd
-XP
-XPG
-XXX
-Xid
-XxZ
-YIELDCPU
-YY
-YYMMDDhhmm
-ZED
-ZF
-Zero'd
-aa
-aaA
-aaB
-aaC
-aaD
-aaa
-aaaaaa
-aaaaab
-aaaaac
-aab
-aac
-aad
-ab
-abc
-abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
-abcdef
-abs
-absname
-abspath
-ac
-aca
-accessor
-ack
-acplt
-activekids
-activep
-activepips
-actualData
-actualKey
-acurs
-ada
-addAll
-addfamilylocker
-addpage
-addr
-addrem
-addrp
-adh
-adj
-adjindx
-adlsVv
-admin
-afterop
-ahr
-alfred
-alg
-algsetup
-alignp
-alldb
-alloc
-alloc'ed
-alloc'ing
-alloced
-allocs
-alsVv
-alsn
-amx
-antoon
-anum
-aparts
-apologise
-app's
-appexit
-appl
-appname
-appnotes
-apprec
-apps
-aq
-archivedir
-areDuplicatesAllowed
-areDuplicatesOrdered
-areKeysRenumbered
-arg
-argp
-args
-argv
-arr
-arraycopy
-arraysz
-asites
-assertEquals
-astubble
-ata
-atoi
-atol
-att
-autoCommit
-autocommit
-autoconf
-autoconfig
-autoremove
-avg
-awk
-baaaaa
-backoff
-badend
-badgen
-badkey
-badnum
-bak
-barreto
-baseClass
-bb
-bba
-bcopy
-bcurs
-bd
-bdb
-bdbcmds
-beginInclusive
-beginKey
-beq
-berkdb
-berkdbcmds
-berkeley
-bfname
-bfree
-bh
-bharray
-bhfree
-bhp
-bhwrite
-bi
-bigint
-bigpages
-bigpair
-binding's
-bitmasks
-bk
-blbs
-blk
-blksize
-blockDecrypt
-blockEncrypt
-blocknum
-blocksize
-bmeta
-bmp
-bndx
-bne
-bnum
-bosselaers
-bostic
-bp
-bqual
-br
-broot
-bs
-bshift
-bsize
-bsizep
-bt
-btcompare
-btrec
-btree
-btreemeta
-btrees
-buf
-bufp
-bufs
-bufsize
-buildpartial
-builtin
-bumpSize
-bval
-bylsn
-bypage
-byteLen
-byteOffset
-byteorder
-bytesToChars
-bytesize
-bytesp
-byteswap
-byteswapped
-bytevalue
-cEelmNrtVZ
-caaaaa
-cachep
-cachesize
-cadjust
-callback
-callback's
-callbk
-calloc
-callocs
-callpgin
-carray
-catalogtest
-cbts
-cc
-ccclose
-ccnext
-ccset
-ccurs
-cd
-cdata
-cdb
-cdel
-ceVv
-ceh
-celmNrtVZ
-celmNtV
-celmNtVZ
-cget
-cgetchk
-ch
-char's
-charLen
-charLength
-charOffset
-charkey
-charset
-checkgen
-checklocker
-chgpg
-childcursor
-childinc
-childproof
-childput
-chk
-chkpoint
-chkpt
-chkspace
-chksum
-chmod
-chongo
-ci
-cip
-cipherInit
-cipherInstance
-cipherUpdateRounds
-ckp
-ckplsn
-cksum
-cl
-classCatalog
-classID
-className
-classpath
-cleandir
-clearIndexKey
-clearerr
-clib
-clientData
-clientrun
-clist
-clnt
-closeevent
-closehandle
-closeme
-cls
-cmap
-cmd
-cmdargs
-cmds
-cmp
-cmpwi
-cnt
-com
-comm
-compareDuplicates
-compareproc
-comparitors
-compat
-concatdatakey
-concatkeydata
-conf
-config
-const
-containsAll
-containsKey
-containsValue
-conv
-convprintable
-copyFrom
-copyin
-copyleft
-copymap
-copyout
-copypage
-countElements
-countp
-cp
-cpage
-cpp
-cputchk
-cq
-cr
-crdel
-creat
-createflag
-crypto
-cs
-csp
-ct
-ctp
-ctp's
-ctps
-ctx
-ctxn
-curadj
-curfile
-curinfo
-curinval
-curlist
-curlsn
-curslen
-curslist
-cursp
-cursq
-cutlim
-cxx
-cxxproc
-cxxthis
-cxxutil
-dat
-dataBinding
-dataInput
-dataOutput
-dataToObject
-databuf
-datalen
-datap
-datapage
-datastr
-db
-dbFileName
-dbOpenFlags
-dbc
-dbc's
-dbca
-dbcb
-dbcl
-dbclear
-dbclient
-dbclose
-dbcmds
-dbcp
-dbcursor
-dbdata
-dbdel
-dbdemo
-dbdp
-dbe
-dbentry
-dbenv
-dbh
-dbinc
-dbinfo
-dbinit
-dbip
-dbj
-dbjoin
-dbkill
-dblist
-dblistlinks
-dblp
-dbm
-dbmclose
-dbmeta
-dbmfp
-dbminit
-dbmp
-dbname
-dbnamep
-dbobj
-dbopen
-dbp
-dbp's
-dbpp
-dbprep
-dbps
-dbq
-dbrdonly
-dbreg
-dbremove
-dbrename
-dbs
-dbsizes
-dbsrv
-dbt
-dbta
-dbtb
-dbtp
-dbtruncate
-dbts
-dbtxn
-dbtype
-dbuf
-dbverify
-dbx
-dcursor
-dd
-deadfile
-deadlocker
-deadmap
-dec
-decrpyting
-def
-defcmp
-defg
-defpfx
-defto
-del
-deletable
-deleteAction
-delext
-delim
-delimp
-delpg
-denom
-dereffing
-des
-deserialize
-deserialized
-deserializing
-dest
-detectp
-dev
-df
-dh
-diff
-difflen
-dir
-directio
-dirent
-dirf
-dirfno
-dirfree
-dirlist
-dirname
-dirp
-dirs
-dirtyRead
-dist
-dists
-ditem
-dl
-dlbytes
-dlen
-dlfcn
-dll
-dlopen
-dm
-dname
-dndx
-doGet
-doTimeouts
-doWork
-dobj
-docRoot
-doevents
-donext
-doreq
-doubleToLongBits
-dp
-dpagep
-dpages
-dpair
-dpgno
-dr
-dremoved
-ds
-dsearch
-dsize
-dst
-dsync
-dtab
-dtabsize
-dumptree
-dup
-dup'ed
-dupcompare
-duperr
-dupmaster
-dupmasters
-dupped
-dups
-dupset
-dupsort
-duptree
-duptype
-dwNumberOfProcessors
-dx
-eax
-ebuf
-edu
-efg
-efh
-egen
-eid
-eid's
-eidp
-ek
-ele
-electinit
-electsend
-electtally
-electvote
-electwait
-elem
-elp
-emap
-emt
-encrpyting
-encryptaes
-encryptany
-endInclusive
-endKey
-endian
-endianness
-endif
-endname
-endodata
-endofile
-endpath
-enqueuing
-ent
-entityBinding
-entrySet
-entryp
-enum
-enums
-env
-env's
-envFlags
-envcl
-envdata
-envdp
-envid
-envip
-envlock
-envp
-envpanic
-envparent
-envremove
-envrpcserver
-envs
-eof
-eor
-erlangen
-errbuf
-errcall
-errfile
-errlock
-errno
-errnum
-erroring
-errorret
-errpfx
-errunlock
-errx
-esat
-esp
-eval
-exactp
-excl
-exe
-exid
-exnum
-expandtab
-expr
-ext
-extentsize
-extentsizep
-externalizable
-extid
-extractIndexKey
-fN
-faq
-faststat
-faultmem
-fc
-fcchk
-fchk
-fclose
-fcn
-fcntl
-fcreate
-fd
-fd's
-fdatasync
-fdm
-fdp
-fdupcurs
-feedback's
-ferr
-ff
-ffactor
-ffactorp
-fget
-fh
-fhp
-fid
-fids
-fileID
-fileIDs
-filedone
-filehandle
-fileid
-fileids
-fileinfo
-fileinit
-filelist
-filenamep
-filenum
-fileopen
-fileops
-fillf
-finalcount
-findFirst
-finddatum
-findlastckp
-finfo
-firstKey
-firstkey
-fiv
-fixup
-fixups
-flagN
-flagsp
-floatToIntBits
-flushcommit
-fmax
-fmethod
-fn
-fname
-fnl
-fnp
-fns
-fnum
-foo
-fopen
-forName
-foreignStore
-form's
-format's
-formatID
-fp
-fprobe
-fptr
-fput
-fq
-freakin
-free'd
-free'ing
-freeable
-freedata
-freefamilylocker
-freelist
-freelock
-freelocker
-freep
-fremove
-freq
-friedl
-fromInclusive
-fromIndex
-fromKey
-fromMapEntry
-fromValue
-frotzed
-fs
-fset
-fst
-fstat
-fstati
-fsync
-ftruncate
-ftype
-fullhome
-fullname
-func
-funcs
-fv
-fwd
-gbytes
-gbytesp
-gc
-gc'ed
-gcc
-gdb
-gen
-genrand
-george
-getBranchQualifier
-getBytes
-getCity
-getClassFormat
-getCollection
-getCurrentKey
-getData
-getDbEnv
-getDbt
-getDbtString
-getDetail
-getEnvironment
-getErrno
-getFlags
-getFormatId
-getGlobalTransactionId
-getIndex
-getInstance
-getLock
-getMode
-getNext
-getObj
-getObject
-getOffset
-getOp
-getPartialLength
-getPartialOffset
-getPrimaryKeyFormat
-getPrimitiveBinding
-getRecordNumber
-getSize
-getString
-getTimeout
-getUserBufferLength
-getValue
-getValueFormat
-getactive
-getboth
-getbothc
-getckp
-getcwd
-getdata
-getdynamic
-getenv
-getinfo
-getjoin
-getlocker
-getlong
-getname
-getnext
-getno
-getobj
-getopt
-getpageinfo
-getpid
-getrusage
-getstack
-getsubopt
-gettime
-gettimeofday
-gettingstarted
-gettype
-getuid
-getulong
-getval
-getzip
-ghi
-gid
-gotkey
-gotta
-groupalloc
-gsf
-gsp
-gtrid
-guesspgsize
-hEvent
-handle's
-handleException
-happend
-hardcode
-hardcoding
-hasNext
-hasPrevious
-hashCode
-hashhdr
-hashinit
-hashmeta
-hashp
-hashproc
-hc
-hcp
-hcreate
-hdestroy
-hdr
-hdrbuf
-hdrchk
-hdrpages
-hdrs
-headMap
-headSet
-header's
-headp
-heldby
-helloworld
-hf
-hijkl
-himark
-histdbt
-hlock
-hmac
-hmeta
-holdl
-homep
-homeroot
-hostaddr
-hostname
-hotcopy
-hp
-hq
-href
-hs
-hsearch
-htab
-htonl
-httpd
-iX
-ia
-icursor
-idbase
-idletimeout
-idleto
-idmap
-idnum
-idp
-ids
-idspace
-idup
-idup'ed
-iface
-ifdef
-iff
-ifndef
-ihold
-iitem
-ik
-ilock
-ilocks
-inc
-incfirst
-incomp
-incr
-incursor
-indexCursor
-indexKey
-indexKeyData
-indexKeyFormat
-indexKeyOutput
-indexKeys
-indexOf
-indexViews
-indexlist
-indx
-info's
-infop
-informatik
-ini
-init
-init'ing
-inited
-initialSize
-inits
-initspin
-inlen
-inline
-inmem
-inmemdbflags
-inmemory
-ino
-inode
-inorder
-inp
-inpitem
-inputOctets
-inregion
-insdel
-int
-intBitsToFloat
-intValue
-intel
-interp
-intial
-ints
-inttypes
-inuse
-inventoryDB
-inventorydb
-io
-ioinfo
-iopsize
-iosDevFind
-ip
-ipcs
-iread
-isAutoCommit
-isByteLen
-isDirtyReadAllowed
-isDirtyReadEnabled
-isEmpty
-isIndexed
-isOrdered
-isTransactional
-isWriteAllowed
-isbad
-isbigendian
-isdeleted
-isdone
-isdst
-isdup
-isolder
-isopd
-ispget
-isroot
-isspace
-istmp
-isvalid
-isync
-itemname
-itemnameDB
-itemorder
-iter
-ith
-iwr
-iwrite
-java
-java's
-javax
-jbyte
-jc
-jenv
-jhi
-jl
-jlong
-jmsjdbc
-jndi
-journaling
-jp
-jq
-jrpcgen
-jta
-kb
-kbyte
-kbytes
-keio
-key's
-keyAssigner
-keyBinding
-keyClass
-keyExtractor
-keyFormat
-keyInput
-keyInstance
-keyLen
-keyMaterial
-keyName
-keyOutput
-keySet
-keybuf
-keyfirst
-keyflag
-keygroup
-keygroups
-keygrp
-keylast
-keynum
-keyp
-keyrange
-keystr
-kgnum
-ki
-killid
-killinterval
-killiteration
-killtest
-klNpP
-klNprRV
-klNprRs
-klinks
-kow
-kp
-kpv
-krinsky
-ks
-kuleuven
-lM
-lP
-lSN
-lang
-last's
-lastError
-lastIndexOf
-lastKey
-lastfile
-lastid
-lastpgno
-later's
-lbtree
-lbucket
-lc
-ld
-ldata
-ldbp
-ldbt
-ldbtsize
-ldcws
-ldl
-ldstub
-le
-len
-lenp
-les
-lf
-lfhp
-lfname
-lg
-lget
-lh
-lhash
-lhi
-libdb
-libfile
-libname
-libpthread
-libthread
-lineno
-listIterator
-listobj
-listp
-lk
-lkrs
-ll
-lld
-llsn
-llu
-lm
-ln
-lnP
-lnsl
-loadme
-localhost
-localtime
-lockForWrite
-lockGet
-lockVector
-lockcount
-lockdown
-locker's
-lockerid
-lockevent
-lockfhp
-lockid
-lockinfo
-lockmgr
-lockmode
-lockobj
-lockop
-lockreq
-lockstep
-locktimeout
-logbuf
-logc
-logclean
-logdir
-logfile
-logfiles
-logflush
-loggap
-logmaxset
-logmsg
-logrec
-logset
-logsonly
-longBitsToDouble
-lorder
-lorderp
-lowlsn
-lp
-lpgno
-lprint
-lput
-lrand
-lrp
-lru
-lsVv
-lsn
-lsnadd
-lsninit
-lsnoff
-lsnp
-lsynch
-lt
-lu
-luB
-luGB
-luKB
-luKb
-luM
-luMB
-luMb
-lvalue
-lwarx
-lwp
-lx
-mNP
-mNs
-machid
-machtab
-maddr
-magicno
-maintinit
-maj
-majver
-makeKey
-makedup
-malloc
-malloc'd
-malloc'ed
-malloc's
-mallocing
-mallocs
-mapEntry
-mapfile
-margo
-markdone
-markneeded
-markus
-marshalIndexKey
-marshalled
-marshalling
-matumoto
-maxRetries
-maxb
-maxcommitperflush
-maxid
-maxkey
-maxkeypage
-maxlockers
-maxlocks
-maxlsn
-maxn
-maxnactive
-maxnlockers
-maxnlocks
-maxnobjects
-maxobjects
-maxopenfd
-maxops
-maxp
-maxperm
-maxpg
-maxpgno
-maxrec
-maxsites
-maxsize
-maxtimeout
-maxto
-maxtxn
-maxtxns
-maxwrite
-maxwrites
-mb
-mbp
-mbucket
-mbytes
-mbytesp
-md
-mem
-membar
-memcmp
-memcmps
-memcpy
-memmove
-memp
-memset
-metachk
-metadata
-metaflags
-metagroup
-metalsn
-metapage
-metasub
-metaswap
-methodID
-mf
-mfp
-mgrp
-midpage
-millitm
-mincommitperflush
-minkey
-minkeyp
-minkeypage
-minlocks
-minp
-minval
-minver
-minwrite
-minwrites
-mip
-mis
-misc
-mjc
-mkdir
-mkdir's
-mlock
-mmap
-mmap'd
-mmap'ing
-mmapped
-mmapsize
-mmapsizep
-mmetalsn
-mmpgno
-modeFlag
-moremiddle
-mortem
-movl
-mp
-mpf
-mpfarray
-mpfq
-mpgno
-mpip
-mpool
-mpoolfile
-mpools
-mpreg
-mps
-msem
-msemaphore
-msg
-msg's
-msgadd
-msgbuf
-msgcall
-msgdbt
-msgfile
-msgfp
-msgs
-msgtype
-msize
-mswap
-mt
-mti
-munlock
-munmap
-mut
-mutex
-mutexes
-mutexlocks
-mutexp
-muxfile
-mv
-mvptr
-mydrive
-mydrivexxx
-myfree
-mylock
-myobjc
-myval
-n'th
-nO
-nP
-nTV
-nTt
-naborts
-nactive
-nalloc
-namelistp
-nameop
-namep
-namesp
-nargc
-nargv
-nbegins
-nbytes
-ncache
-ncachep
-ncaches
-ncommit
-ncommits
-nconflicts
-ncurs
-ndary
-ndata
-ndbm
-ndeadalloc
-ndeadlocks
-ndir
-ndx
-needswap
-neg
-nelem
-nelemp
-nentries
-nevict
-newalloc
-newclient
-newdata
-newdatabase
-newfh
-newfile
-newitem
-newlist
-newmaster
-newname
-newopd
-newpage
-newpgno
-newsite
-newsites
-newsize
-next's
-nextIndex
-nextdup
-nextents
-nextinfo
-nextkey
-nextlsn
-nextnodup
-nextpgno
-nfid
-nfiles
-ng
-nitems
-nkeys
-nlist
-nlockers
-nlocks
-nlocktimeouts
-nlsn
-nmodes
-nnext
-nnextlsn
-nnowaits
-noWait
-noarchive
-nobjects
-nobuffer
-nodup
-nodupdata
-noet
-nogrant
-nohasham
-nolock
-nolocking
-nolonger
-nomem
-nommap
-noop
-nooverwrite
-nop
-nopanic
-nopenp
-norep
-nosort
-nosync
-nosystemmem
-notdurable
-notfound
-notgranted
-notused
-notzero
-novrfy
-nowait
-nowaits
-np
-npages
-npgno
-nprocs
-nptr
-nr
-nread
-nreaders
-nrec
-nrecords
-nrecs
-nreg
-nreleases
-nrepeat
-nrequests
-nrestores
-nsites
-nsize
-nsl
-nsleep
-nsleepp
-ntasks
-nthreads
-nthrottles
-ntohl
-ntxns
-ntxntimeouts
-nuls
-num
-numberOfKeysRead
-numdup
-numdups
-numext
-numlocks
-nval
-nvotes
-nwrite
-nwritep
-nwriters
-nwrites
-nwrotep
-nxt
-obj
-objc
-objectArrayToString
-objectToData
-objectToKey
-objectToValue
-objp
-objs
-objv
-octets
-offdup
-offp
-offpage
-offsetp
-oflags
-ohash
-ok
-oldValue
-oldValues
-olddata
-olditem
-oldname
-oldrec
-oldsize
-oli
-omniti
-omode
-ondisk
-onefile
-onint
-onoff
-onoffp
-onpage
-op
-opd
-openCursors
-openFlags
-openfd
-openfiles
-openhandle
-opensub
-opflags
-opmods
-ops
-optarg
-opterr
-optind
-optopt
-optreset
-orderchkonly
-org
-orig
-originfo
-origline
-origmap
-origp
-os
-osynch
-outBuffer
-outbuf
-outdatedp
-outfd
-outfile
-outfp
-outlen
-outstr
-ovfl
-ovflok
-ovflpage
-ovflpoint
-ovflsize
-ovput
-ovref
-padDecrypt
-padEncrypt
-padp
-pageimage
-pageinfo
-pagelayout
-pagelist
-pagelsn
-pageno
-pagep
-pagereq
-pagesize
-pagesizep
-pagesizes
-pagespace
-pagetype
-pagezero
-pagf
-pagfno
-panic'd
-panic'ing
-paniccall
-panicstate
-params
-parentid
-parseLong
-partsize
-passwd
-passwds
-paulo
-pct
-pdbp
-pdf
-penv
-perf
-perfdb
-perftool
-perms
-pflag
-pfx
-pg
-pgaddr
-pgcookie
-pgdbp
-pgdbt
-pgerr
-pget
-pgfmt
-pgfree
-pggap
-pgin
-pginfo
-pgip
-pgmax
-pgno
-pgnoadd
-pgnoaddr
-pgnop
-pgnos
-pgnum
-pgout
-pgread
-pgs
-pgset
-pgsize
-pgwrite
-ph
-php
-physdel
-physwrite
-pid
-pids
-pinref
-pinsert
-pitem
-pk
-pkey
-pkey's
-pkeys
-pkg
-placeholder
-plist
-pmap
-pn
-poff
-portmapper
-pos
-postdestroy
-postlog
-postlogmeta
-postopen
-postsync
-pp
-ppc
-pr
-prR
-prdb
-prdbt
-pre
-pread
-prec
-predestroy
-preopen
-preparse
-preplist
-preprocess
-preprocessed
-preread
-prereq
-presorted
-prev
-prev's
-prevfile
-previousIndex
-prevlsn
-prevnodup
-prflags
-prfooter
-prheader
-pri
-primaryKey
-primaryKeyData
-primaryKeyFormat
-primaryKeyInput
-primaryKeyThang
-primget
-printf
-printlock
-printlog
-priorityp
-prnpage
-proc
-procs
-proff
-progname
-progpath
-protos
-prpage
-prqueue
-prtree
-pseudorandom
-psize
-psplit
-pstat
-ptail
-pthread
-pthreads
-ptr
-ptrdiff
-ptype
-putAll
-putall
-putchar
-putitem
-putobj
-putop
-putpageinfo
-putr
-pv
-pwrite
-qV
-qam
-qammeta
-qmeta
-qmpf
-qnx
-qp
-qs
-qtest
-quV
-queuestart
-quicksort
-quotedStr
-rRV
-rRs
-rV
-rand
-randtbl
-rbtree
-rcon
-rcuradj
-rcursor
-rcvd
-rdata
-rdbc
-rdbenv
-rdonly
-rdump
-reacquired
-reacquires
-readBoolean
-readByte
-readBytes
-readChar
-readChars
-readDouble
-readFloat
-readInt
-readLong
-readShort
-readString
-readUnsignedByte
-readUnsignedInt
-readUnsignedShort
-readd
-readdir
-readn
-readonly
-readratio
-realloc
-realloc'd
-realloc'ed
-reallocing
-rec
-recfill
-reclen
-reclength
-recno
-recnop
-recnos
-recnum
-recnums
-recops
-record's
-recordNumber
-recordlen
-recs
-rectype
-recvd
-refcnt
-refcount
-refcounting
-reffed
-regids
-reginfo
-regionmax
-reglocks
-regmutex
-regmutexes
-regop
-regsize
-relen
-relink
-rem
-remevent
-remfile
-remfirst
-remlock
-removeAll
-remrem
-renum
-renv
-rep's
-repdb
-repl
-replication's
-replpair
-replyp
-reppg
-repquote
-repsite
-reput
-reputpair
-req
-resizep
-resync
-retPrimaryKey
-retValue
-retbuf
-retcopy
-retcount
-rethrown
-reties
-retp
-retsp
-retval
-reverseconcat
-reversedata
-revsplitoff
-rf
-rfd
-rfp
-rget
-rheader
-ri
-rijmen
-rijndael
-rijndaelDecrypt
-rijndaelDecryptRound
-rijndaelEncrypt
-rijndaelEncryptRound
-rijndaelKeySetupDec
-rijndaelKeySetupEnc
-ritem
-riw
-rk
-rkey
-rlen
-rlsn
-rlsnp
-rm
-rmdir
-rmdir's
-rmid
-rmw
-ro
-roff
-rollforward
-rootent
-rootlsn
-rp
-rp's
-rpath
-rpc
-rpcgen
-rpcid
-rpcserver
-rprint
-rptr
-rq
-rr
-rrecno
-rs
-rsearch
-rskey
-rsplit
-rtree
-rtxn
-rundb
-runlog
-rusage
-rw
-rwrw
-rwrwrw
-sS
-sV
-sVv
-salloc
-salvager's
-savetime
-sched
-scount
-sdb
-sdbp
-seckey
-secon
-secondary's
-secondaryKeyCreate
-secs
-secsp
-sectorsize
-segdata
-segid
-sema
-semid
-seminfo
-semun
-sendpages
-sendproc
-sep
-seq
-seqnum
-seqp
-serialobj
-servlet
-setAppDispatch
-setAppendRecno
-setBtreeCompare
-setBtreePrefix
-setCacheSize
-setData
-setDuplicatelicateCompare
-setEncrypted
-setErrorHandler
-setErrorPrefix
-setFeedback
-setFeedbackHandler
-setFlags
-setHash
-setLock
-setMode
-setObj
-setObject
-setOffset
-setOp
-setPanicHandler
-setPartialLength
-setPartialOffset
-setRecno
-setRecordNumber
-setReplicationLimit
-setReplicationTransport
-setSize
-setTimeout
-setUserBufferLength
-setValue
-setflags
-setid
-setlsn
-settimeout
-setto
-setval
-sexing
-sgenrand
-sh
-shalloc
-shalloc'ed
-shalloced
-sharedb
-shareenv
-shash
-shm
-shmat
-shmctl
-shmdt
-shmem
-shmget
-shmname
-shortread
-shownull
-shqueue
-shr
-shreg
-siginit
-signo
-sigresend
-singleKey
-sizeAdded
-sizeNeeded
-sizefix
-sizeof
-sj
-skiplist
-skiplists
-skodonj
-sl
-sle
-sleepycat
-slh
-slumber'd
-smap
-smax
-snapshotting
-sniglet
-snprintf
-sortdups
-sourcep
-sp
-sparc
-spawnl
-spinlock
-spinlocks
-spinsp
-splitdata
-splitmeta
-splitp
-sprintf
-srand
-srandom
-src
-sread
-ss
-sscanf
-sse
-sshift
-ssize
-sslll
-sss
-stat
-stati
-stats
-stbar
-std
-stddev
-stderr
-stdfd
-stdin
-stdint
-stdlib
-stdmode
-stdout
-stkgrow
-stkrel
-stl
-storedCollection
-storedIterator
-storedList
-storedMap
-storedSet
-storedSortedMap
-storedSortedSet
-stqe
-stqh
-str
-strcasecmp
-strcmp
-strdup
-strdup'ed
-strerror
-stringToBytes
-stringp
-strlen
-strncasecmp
-strncmp
-strsep
-strtol
-strtoul
-struct
-structs
-structure's
-sts
-stwcx
-subList
-subMap
-subSet
-subcases
-subclassed
-subdatabase
-subdatabase's
-subdb
-subdbname
-subdbpg
-subdbs
-subdistribution
-subdistributions
-submap
-subname
-subpackages
-subtransaction
-subtransactions
-sullivan
-sv
-svc
-sw
-swigCPtr
-swpb
-sync'd
-sync'ed
-synced
-syncs
-sysattach
-sysconf
-sysdetach
-sz
-t's
-tV
-tVZ
-tableent
-tablesize
-tailMap
-tailSet
-tailq
-tas
-taskLock
-tc
-tcl
-tcp
-td
-tdata
-tearDown
-terra
-testName
-testcopy
-testdata
-testdestdir
-testdigits
-testdocopy
-tffsp
-tfsp
-thang
-theVendor
-thies
-thr
-thread's
-threadID
-threadedness
-tid
-tids
-tiebreaker
-tiebreaking
-timeoutp
-timestamp
-timeval
-timout
-timouts
-tlen
-tm
-tmap
-tmax
-tmp
-tmpdir
-tmpmap
-tmpname
-tmutex
-tnum
-toArray
-toBuf
-toHexString
-toInclusive
-toIndex
-toKey
-toList
-toMapEntry
-toString
-toValue
-toched
-todo
-toobig
-tp
-tpcb
-tput
-tqe
-tqh
-tr
-transport's
-treeorder
-tregion
-trinomials
-trunc
-truncdata
-ts
-tsl
-tstart
-ttpcbddlk
-ttpcbi
-ttpcbr
-ttype
-tv
-tx
-txn
-txnal
-txnapp
-txnarray
-txnid
-txnidcl
-txnids
-txnip
-txnlist
-txnp
-txns
-txntimeout
-txt
-ua
-ubell
-ud
-udbt
-ufid
-ufree
-uid
-uintmax
-uintptr
-ul
-ulen
-ulens
-ulinks
-umalloc
-uncorrect
-undef
-undeleting
-undo'ing
-undodup
-undosplit
-uni
-unicode
-unindexed
-uniq
-unistd
-unix
-unmap
-unmapfile
-unmark
-unmarshalData
-unmarshalled
-unpinned
-unpinning
-unref
-unregistry
-upd
-updateDatabaseEntry
-updateDbt
-updateckp
-upg
-upi
-urealloc
-useCurrentKey
-usePrimaryKey
-useValue
-usec
-usecs
-usecsp
-usermem
-usr
-usrAppInit
-util
-vVxXZ
-vVxXyZ
-vZ
-va
-val
-value's
-valueBinding
-valueData
-valueEntityBinding
-valueFormat
-valueInput
-valueInputOutput
-valueOf
-valueOutput
-var
-variadic
-vars
-vdp
-vdp's
-vec
-vendorDB
-vendordb
-ver
-verbage
-vflag
-vica
-view's
-vrfy
-vrfyutil
-vsnprintf
-vsprintf
-vtruncate
-vw
-vx
-vxmutex
-vxtmp
-vxtpcb
-vxworks
-wDay
-wHour
-wMinute
-wMonth
-wSecond
-wYear
-waitl
-waitlist
-waitsfor
-walkdupint
-walkpages
-walkqueue
-wb
-wc
-wcount
-weblogic
-weblogic's
-webquill
-windsh
-winnt
-wmask
-wnt
-wordlist
-workcurs
-writeAllowed
-writeBoolean
-writeByte
-writeBytes
-writeChar
-writeChars
-writeCursor
-writeDouble
-writeFloat
-writeInt
-writeLong
-writeShort
-writeString
-writeUnsignedByte
-writeUnsignedInt
-writeUnsignedShort
-writeable
-writeback
-writelock
-writelocks
-wrlock
-wrnosync
-wsize
-wt
-wthread
-xa
-xact
-xalinks
-xchg
-xchgb
-xdr
-xid
-xids
-xml
-xor
-xorl
-xxx
-xyz
-yieldcpu
-zend
-zero'd
-zeroeth
-zerofill
-zipcode
-zl
diff --git a/storage/bdb/test/scr011/chk.tags b/storage/bdb/test/scr011/chk.tags
deleted file mode 100644
index f1d680ac7d7..00000000000
--- a/storage/bdb/test/scr011/chk.tags
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.tags,v 1.14 2004/10/07 20:30:32 bostic Exp $
-#
-# Check to make sure we don't need any more symbolic links to tags files.
-
-d=../..
-
-# Test must be run from the top-level directory, not from a test directory.
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__1
-t2=__2
-
-(cd $d && ls -F | egrep / | sort |
- sed -e 's/\///' \
- -e '/^CVS$/d' \
- -e '/^build_vxworks$/d' \
- -e '/^build_win32$/d' \
- -e '/^build_win64$/d' \
- -e '/^docs$/d' \
- -e '/^docs_book$/d' \
- -e '/^docs_src$/d' \
- -e '/^examples_java$/d' \
- -e '/^java$/d' \
- -e '/^mod_db4$/d' \
- -e '/^perl$/d' \
- -e '/^php_db4$/d' \
- -e '/^test$/d' \
- -e '/^test_cxx$/d' \
- -e '/^test_purify$/d' \
- -e '/^test_server$/d' \
- -e '/^test_thread$/d' \
- -e '/^test_vxworks$/d') > $t1
-
-(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
-if diff $t1 $t2 > /dev/null; then
- exit 0
-else
- echo "<<< source tree >>> tags files"
- diff $t1 $t2
- exit 1
-fi
diff --git a/storage/bdb/test/scr012/chk.vx_code b/storage/bdb/test/scr012/chk.vx_code
deleted file mode 100644
index 8b7916053ae..00000000000
--- a/storage/bdb/test/scr012/chk.vx_code
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.vx_code,v 1.7 2004/09/16 17:21:11 bostic Exp $
-#
-# Check to make sure the auto-generated utility code in the VxWorks build
-# directory compiles.
-
-d=../..
-
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
- echo 'FAIL: unable to find or build libdb.a'
- exit 1
-}
-
-rm -f t.c t1.c t2.c
-
-F="$d/clib/getopt.c $d/common/util_arg.c $d/common/util_cache.c
- $d/common/util_log.c $d/common/util_sig.c $d/*/*_autop.c"
-
-header()
-{
- echo "int"
- echo "main(int argc, char *argv[])"
- echo "{return ($1(argv[1]));}"
-}
-
-(echo "int"
- echo "main(int argc, char *argv[])"
- echo "{"
- echo "int i;") > t1.c
-
-for i in db_archive db_checkpoint db_deadlock db_dump db_load \
- db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
- echo " compiling build_vxworks/$i"
- (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
- if cc -Wall -I.. -I$d t.c $F ../libdb.a -o t; then
- :
- else
- echo "FAIL: unable to compile $i"
- exit 1
- fi
-
- cat $d/build_vxworks/$i/$i.c >> t2.c
- echo "i = $i(argv[1]);" >> t1.c
-done
-
-(cat t2.c t1.c; echo "return (0); }") > t.c
-
-echo " compiling build_vxworks utility composite"
-if cc -Dlint -Wall -I.. -I$d t.c $F ../libdb.a -o t; then
- :
-else
- echo "FAIL: unable to compile utility composite"
- exit 1
-fi
-
-exit 0
diff --git a/storage/bdb/test/scr013/chk.stats b/storage/bdb/test/scr013/chk.stats
deleted file mode 100644
index 7ee71392e1c..00000000000
--- a/storage/bdb/test/scr013/chk.stats
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.stats,v 1.9 2004/11/08 14:49:42 bostic Exp $
-#
-# Check to make sure all of the stat structure members are included in
-# all of the possible formats.
-
-# Top-level directory.
-d=../..
-docs=../../../db_docs
-
-# Path names are from a top-level directory.
-[ -f $d/README ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-exitv=0
-t=__tmp
-
-# Extract the field names for a structure from the db.h file.
-inc_fields()
-{
- sed -e "/struct $1 {/,/^};$/p" \
- -e d < $d/dbinc/db.in |
- sed -e 1d \
- -e '$d' \
- -e '/;/!d' \
- -e 's/;.*//' \
- -e 's/^[ ].*[ \*]//'
-}
-
-cat << END_OF_IGNORE > IGNORE
-bt_maxkey
-bt_metaflags
-hash_metaflags
-qs_metaflags
-qs_ndata
-END_OF_IGNORE
-
-# Check to make sure the elements of a structure from db.h appear in
-# the other files.
-inc()
-{
- for i in `inc_fields $1`; do
- if egrep -w $i IGNORE > /dev/null; then
- echo " $1: ignoring $i"
- continue
- fi
- for j in $2; do
- if egrep -w $i $j > /dev/null; then
- :;
- else
- echo " $1: $i not found in $j."
- exitv=1
- fi
- done
- done
-}
-
-inc "__db_bt_stat" "$d/tcl/tcl_db.c $d/btree/bt_stat.c $docs/db/db_stat.so"
-inc "__db_h_stat" "$d/tcl/tcl_db.c $d/hash/hash_stat.c $docs/db/db_stat.so"
-inc __db_lock_stat \
- "$d/tcl/tcl_lock.c $d/lock/lock_stat.c $docs/lock/lock_stat.so"
-inc __db_log_stat "$d/tcl/tcl_log.c $d/log/log_stat.c $docs/log/log_stat.so"
-inc __db_mpool_fstat \
- "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so"
-inc __db_mpool_stat \
- "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so"
-inc "__db_qam_stat" \
- "$d/tcl/tcl_db.c $d/qam/qam_stat.c $docs/db/db_stat.so"
-inc __db_rep_stat \
- "$d/tcl/tcl_rep.c $d/rep/rep_stat.c $docs/rep/rep_stat.so"
-inc __db_seq_stat \
- "$d/tcl/tcl_seq.c $d/sequence/seq_stat.c $docs/seq/seq_stat.so"
-inc __db_txn_stat \
- "$d/tcl/tcl_txn.c $d/txn/txn_stat.c $docs/txn/txn_stat.so"
-
-# Check to make sure the elements from a man page appears in db.in.
-man()
-{
- for i in `cat $t`; do
- if egrep -w $i IGNORE > /dev/null; then
- echo " $1: ignoring $i"
- continue
- fi
- if egrep -w $i $d/dbinc/db.in > /dev/null; then
- :;
- else
- echo " $1: $i not found in db.h."
- exitv=1
- fi
- done
-}
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/db/db_stat.so > $t
-man "checking db_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/lock/lock_stat.so > $t
-man "checking lock_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/log/log_stat.so > $t
-man "checking log_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/memp/memp_stat.so > $t
-man "checking memp_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/rep/rep_stat.so > $t
-man "checking rep_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/seq/seq_stat.so > $t
-man "checking seq_stat.so against db.h"
-
-sed -e '/m4_field(/!d' \
- -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' \
- -e 's/__LB__.*//' < $docs/txn/txn_stat.so > $t
-man "checking txn_stat.so against db.h"
-
-exit $exitv
diff --git a/storage/bdb/test/scr014/chk.err b/storage/bdb/test/scr014/chk.err
deleted file mode 100644
index 72b4a62719f..00000000000
--- a/storage/bdb/test/scr014/chk.err
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $
-#
-# Check to make sure all of the error values have corresponding error
-# message strings in db_strerror().
-
-# Top-level directory.
-d=../..
-
-# Path names are from a top-level directory.
-[ -f $d/README ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__tmp1
-t2=__tmp2
-
-egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
-sed -e '/^db_strerror/,/^}/{' \
- -e '/ case DB_/{' \
- -e 's/:.*//' \
- -e 's/.* //' \
- -e p \
- -e '}' \
- -e '}' \
- -e d \
- < $d/common/db_err.c > $t2
-
-cmp $t1 $t2 > /dev/null ||
-(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
-
-exit 0
diff --git a/storage/bdb/test/scr015/README b/storage/bdb/test/scr015/README
deleted file mode 100644
index 75a356eea06..00000000000
--- a/storage/bdb/test/scr015/README
+++ /dev/null
@@ -1,36 +0,0 @@
-# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $
-
-Use the scripts testall or testone to run all, or just one of the C++
-tests. You must be in this directory to run them. For example,
-
- $ export LIBS="-L/usr/include/BerkeleyDB/lib"
- $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
- $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
- $ ./testone TestAppendRecno
- $ ./testall
-
-The scripts will use c++ in your path. Set environment variables $CXX
-to override this. It will also honor any $CXXFLAGS and $LIBS
-variables that are set, except that -c are silently removed from
-$CXXFLAGS (since we do the compilation in one step).
-
-To run successfully, you will probably need to set $LD_LIBRARY_PATH
-to be the directory containing libdb_cxx-X.Y.so
-
-As an alternative, use the --prefix=<DIR> option, a la configure
-to set the top of the BerkeleyDB install directory. This forces
-the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
-For example,
-
- $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
- $ ./testall --prefix=/usr/include/BerkeleyDB
-
-The test framework is pretty simple. Any <name>.cpp file in this
-directory that is not mentioned in the 'ignore' file represents a
-test. If the test is not compiled successfully, the compiler output
-is left in <name>.compileout . Otherwise, the java program is run in
-a clean subdirectory using as input <name>.testin, or if that doesn't
-exist, /dev/null. Output and error from the test run are put into
-<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
-they are used as reference files and any differences are reported.
-If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr015/TestConstruct01.cpp b/storage/bdb/test/scr015/TestConstruct01.cpp
deleted file mode 100644
index 0b0495ce9ce..00000000000
--- a/storage/bdb/test/scr015/TestConstruct01.cpp
+++ /dev/null
@@ -1,323 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestConstruct01.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $
- */
-
-/*
- * Do some regression tests for constructors.
- * Run normally (without arguments) it is a simple regression test.
- * Run with a numeric argument, it repeats the regression a number
- * of times, to try to determine if there are memory leaks.
- */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <iostream.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#ifndef _MSC_VER
-#include <unistd.h>
-#endif
-#endif
-
-#include <iomanip.h>
-#include <db_cxx.h>
-
-#define ERR(a) \
- do { \
- cout << "FAIL: " << (a) << "\n"; sysexit(1); \
- } while (0)
-
-#define ERR2(a1,a2) \
- do { \
- cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
- } while (0)
-
-#define ERR3(a1,a2,a3) \
- do { \
- cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
- } while (0)
-
-#define CHK(a) \
- do { \
- int _ret; \
- if ((_ret = (a)) != 0) { \
- ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
- } \
- } while (0)
-
-#ifdef VERBOSE
-#define DEBUGOUT(a) cout << a << "\n"
-#else
-#define DEBUGOUT(a)
-#endif
-
-#define CONSTRUCT01_DBNAME "construct01.db"
-#define CONSTRUCT01_DBDIR "."
-#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
-
-int itemcount; // count the number of items in the database
-
-// A good place to put a breakpoint...
-//
-void sysexit(int status)
-{
- exit(status);
-}
-
-void check_file_removed(const char *name, int fatal)
-{
- unlink(name);
-#if 0
- if (access(name, 0) == 0) {
- if (fatal)
- cout << "FAIL: ";
- cout << "File \"" << name << "\" still exists after run\n";
- if (fatal)
- sysexit(1);
- }
-#endif
-}
-
-// Check that key/data for 0 - count-1 are already present,
-// and write a key/data for count. The key and data are
-// both "0123...N" where N == count-1.
-//
-// For some reason on Windows, we need to open using the full pathname
-// of the file when there is no environment, thus the 'has_env'
-// variable.
-//
-void rundb(Db *db, int count, int has_env)
-{
- const char *name;
-
- if (has_env)
- name = CONSTRUCT01_DBNAME;
- else
- name = CONSTRUCT01_DBFULLPATH;
-
- db->set_error_stream(&cerr);
-
- // We don't really care about the pagesize, but we do want
- // to make sure adjusting Db specific variables works before
- // opening the db.
- //
- CHK(db->set_pagesize(1024));
- CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
-
- // The bit map of keys we've seen
- long bitmap = 0;
-
- // The bit map of keys we expect to see
- long expected = (1 << (count+1)) - 1;
-
- char outbuf[10];
- int i;
- for (i=0; i<count; i++) {
- outbuf[i] = '0' + i;
- }
- outbuf[i++] = '\0';
- Dbt key(outbuf, i);
- Dbt data(outbuf, i);
-
- DEBUGOUT("Put: " << outbuf);
- CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
-
- // Acquire a cursor for the table.
- Dbc *dbcp;
- CHK(db->cursor(NULL, &dbcp, 0));
-
- // Walk through the table, checking
- Dbt readkey;
- Dbt readdata;
- while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
- char *key_string = (char *)readkey.get_data();
- char *data_string = (char *)readdata.get_data();
- DEBUGOUT("Got: " << key_string << ": " << data_string);
- int len = strlen(key_string);
- long bit = (1 << len);
- if (len > count) {
- ERR("reread length is bad");
- }
- else if (strcmp(data_string, key_string) != 0) {
- ERR("key/data don't match");
- }
- else if ((bitmap & bit) != 0) {
- ERR("key already seen");
- }
- else if ((expected & bit) == 0) {
- ERR("key was not expected");
- }
- else {
- bitmap |= bit;
- expected &= ~(bit);
- for (i=0; i<len; i++) {
- if (key_string[i] != ('0' + i)) {
- cout << " got " << key_string
- << " (" << (int)key_string[i] << ")"
- << ", wanted " << i
- << " (" << (int)('0' + i) << ")"
- << " at position " << i << "\n";
- ERR("key is corrupt");
- }
- }
- }
- }
- if (expected != 0) {
- cout << " expected more keys, bitmap is: " << expected << "\n";
- ERR("missing keys in database");
- }
- CHK(dbcp->close());
- CHK(db->close(0));
-}
-
-void t1(int except_flag)
-{
- cout << " Running test 1:\n";
- Db db(0, except_flag);
- rundb(&db, itemcount++, 0);
- cout << " finished.\n";
-}
-
-void t2(int except_flag)
-{
- cout << " Running test 2:\n";
- Db db(0, except_flag);
- rundb(&db, itemcount++, 0);
- cout << " finished.\n";
-}
-
-void t3(int except_flag)
-{
- cout << " Running test 3:\n";
- Db db(0, except_flag);
- rundb(&db, itemcount++, 0);
- cout << " finished.\n";
-}
-
-void t4(int except_flag)
-{
- cout << " Running test 4:\n";
- DbEnv env(except_flag);
- CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
- Db db(&env, 0);
- CHK(db.close(0));
- CHK(env.close(0));
- cout << " finished.\n";
-}
-
-void t5(int except_flag)
-{
- cout << " Running test 5:\n";
- DbEnv env(except_flag);
- CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
- Db db(&env, 0);
- rundb(&db, itemcount++, 1);
- // Note we cannot reuse the old Db!
- Db anotherdb(&env, 0);
-
- anotherdb.set_errpfx("test5");
- rundb(&anotherdb, itemcount++, 1);
- CHK(env.close(0));
- cout << " finished.\n";
-}
-
-void t6(int except_flag)
-{
- cout << " Running test 6:\n";
-
- /* From user [#2939] */
- int err;
-
- DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
- penv->set_cachesize(0, 32 * 1024, 0);
- penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
-
- //LEAK: remove this block and leak disappears
- Db* pdb = new Db(penv,0);
- if ((err = pdb->close(0)) != 0) {
- fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
- }
- delete pdb;
- //LEAK: remove this block and leak disappears
-
- if ((err = penv->close(0)) != 0) {
- fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
- }
- delete penv;
-
- cout << " finished.\n";
-}
-
-// remove any existing environment or database
-void removeall()
-{
- {
- DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
- (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
- }
-
- check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
- for (int i=0; i<8; i++) {
- char buf[20];
- sprintf(buf, "__db.00%d", i);
- check_file_removed(buf, 1);
- }
-}
-
-int doall(int except_flag)
-{
- itemcount = 0;
- try {
- // before and after the run, removing any
- // old environment/database.
- //
- removeall();
- t1(except_flag);
- t2(except_flag);
- t3(except_flag);
- t4(except_flag);
- t5(except_flag);
- t6(except_flag);
-
- removeall();
- return 0;
- }
- catch (DbException &dbe) {
- ERR2("EXCEPTION RECEIVED", dbe.what());
- }
- return 1;
-}
-
-int main(int argc, char *argv[])
-{
- int iterations = 1;
- if (argc > 1) {
- iterations = atoi(argv[1]);
- if (iterations < 0) {
- ERR("Usage: construct01 count");
- }
- }
- for (int i=0; i<iterations; i++) {
- if (iterations != 0) {
- cout << "(" << i << "/" << iterations << ") ";
- }
- cout << "construct01 running:\n";
- if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
- ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
- }
- else if (doall(0) != 0) {
- ERR("SOME TEST FAILED FOR EXCEPTION TEST");
- }
- else {
- cout << "\nALL TESTS SUCCESSFUL\n";
- }
- }
- return 0;
-}
diff --git a/storage/bdb/test/scr015/TestConstruct01.testerr b/storage/bdb/test/scr015/TestConstruct01.testerr
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/storage/bdb/test/scr015/TestConstruct01.testerr
+++ /dev/null
diff --git a/storage/bdb/test/scr015/TestConstruct01.testout b/storage/bdb/test/scr015/TestConstruct01.testout
deleted file mode 100644
index 9b840f9fcf4..00000000000
--- a/storage/bdb/test/scr015/TestConstruct01.testout
+++ /dev/null
@@ -1,27 +0,0 @@
-(0/1) construct01 running:
- Running test 1:
- finished.
- Running test 2:
- finished.
- Running test 3:
- finished.
- Running test 4:
- finished.
- Running test 5:
- finished.
- Running test 6:
- finished.
- Running test 1:
- finished.
- Running test 2:
- finished.
- Running test 3:
- finished.
- Running test 4:
- finished.
- Running test 5:
- finished.
- Running test 6:
- finished.
-
-ALL TESTS SUCCESSFUL
diff --git a/storage/bdb/test/scr015/TestExceptInclude.cpp b/storage/bdb/test/scr015/TestExceptInclude.cpp
deleted file mode 100644
index 28bc498222f..00000000000
--- a/storage/bdb/test/scr015/TestExceptInclude.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestExceptInclude.cpp,v 1.4 2002/07/05 22:17:59 dda Exp $
- */
-
-/* We should be able to include cxx_except.h without db_cxx.h,
- * and use the DbException class. We do need db.h to get a few
- * typedefs defined that the DbException classes use.
- *
- * This program does nothing, it's just here to make sure
- * the compilation works.
- */
-#include <db.h>
-#include <cxx_except.h>
-
-int main(int argc, char *argv[])
-{
- DbException *dbe = new DbException("something");
- DbMemoryException *dbme = new DbMemoryException("anything");
-
- dbe = dbme;
-}
-
diff --git a/storage/bdb/test/scr015/TestGetSetMethods.cpp b/storage/bdb/test/scr015/TestGetSetMethods.cpp
deleted file mode 100644
index 1d896766db3..00000000000
--- a/storage/bdb/test/scr015/TestGetSetMethods.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestGetSetMethods.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $
- */
-
-/*
- * Do some regression tests for simple get/set access methods
- * on DbEnv, DbTxn, Db. We don't currently test that they have
- * the desired effect, only that they operate and return correctly.
- */
-
-#include <db_cxx.h>
-#include <iostream.h>
-
-int main(int argc, char *argv[])
-{
- try {
- DbEnv *dbenv = new DbEnv(0);
- DbTxn *dbtxn;
- u_int8_t conflicts[10];
-
- dbenv->set_error_stream(&cerr);
- dbenv->set_timeout(0x90000000,
- DB_SET_LOCK_TIMEOUT);
- dbenv->set_lg_bsize(0x1000);
- dbenv->set_lg_dir(".");
- dbenv->set_lg_max(0x10000000);
- dbenv->set_lg_regionmax(0x100000);
- dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
- dbenv->set_lk_detect(DB_LOCK_DEFAULT);
- // exists, but is deprecated:
- // dbenv->set_lk_max(0);
- dbenv->set_lk_max_lockers(100);
- dbenv->set_lk_max_locks(10);
- dbenv->set_lk_max_objects(1000);
- dbenv->set_mp_mmapsize(0x10000);
- dbenv->set_tas_spins(1000);
-
- // Need to open the environment so we
- // can get a transaction.
- //
- dbenv->open(".", DB_CREATE | DB_INIT_TXN |
- DB_INIT_LOCK | DB_INIT_LOG |
- DB_INIT_MPOOL,
- 0644);
-
- dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
- dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
- dbtxn->abort();
-
- dbenv->close(0);
-
- // We get a db, one for each type.
- // That's because once we call (for instance)
- // set_bt_maxkey, DB 'knows' that this is a
- // Btree Db, and it cannot be used to try Hash
- // or Recno functions.
- //
- Db *db_bt = new Db(NULL, 0);
- db_bt->set_bt_maxkey(10000);
- db_bt->set_bt_minkey(100);
- db_bt->set_cachesize(0, 0x100000, 0);
- db_bt->close(0);
-
- Db *db_h = new Db(NULL, 0);
- db_h->set_h_ffactor(0x10);
- db_h->set_h_nelem(100);
- db_h->set_lorder(0);
- db_h->set_pagesize(0x10000);
- db_h->close(0);
-
- Db *db_re = new Db(NULL, 0);
- db_re->set_re_delim('@');
- db_re->set_re_pad(10);
- db_re->set_re_source("re.in");
- db_re->close(0);
-
- Db *db_q = new Db(NULL, 0);
- db_q->set_q_extentsize(200);
- db_q->close(0);
-
- }
- catch (DbException &dbe) {
- cerr << "Db Exception: " << dbe.what() << "\n";
- }
- return 0;
-}
diff --git a/storage/bdb/test/scr015/TestKeyRange.cpp b/storage/bdb/test/scr015/TestKeyRange.cpp
deleted file mode 100644
index d875cb20b81..00000000000
--- a/storage/bdb/test/scr015/TestKeyRange.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestKeyRange.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $
- */
-
-/*
- * NOTE: AccessExample changed to test Db.key_range. We made a global
- * change of /AccessExample/TestKeyRange/, the only other changes are
- * marked with comments that are notated as 'ADDED'.
- */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <iostream.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#ifndef _MSC_VER
-#include <unistd.h>
-#endif
-#endif
-
-#include <iomanip.h>
-#include <db_cxx.h>
-
-class TestKeyRange
-{
-public:
- TestKeyRange();
- void run();
-
-private:
- static const char FileName[];
-
- // no need for copy and assignment
- TestKeyRange(const TestKeyRange &);
- void operator = (const TestKeyRange &);
-};
-
-static void usage(); // forward
-
-int main(int argc, char *argv[])
-{
- if (argc > 1) {
- usage();
- }
-
- // Use a try block just to report any errors.
- // An alternate approach to using exceptions is to
- // use error models (see DbEnv::set_error_model()) so
- // that error codes are returned for all Berkeley DB methods.
- //
- try {
- TestKeyRange app;
- app.run();
- return 0;
- }
- catch (DbException &dbe) {
- cerr << "TestKeyRange: " << dbe.what() << "\n";
- return 1;
- }
-}
-
-static void usage()
-{
- cerr << "usage: TestKeyRange\n";
- exit(1);
-}
-
-const char TestKeyRange::FileName[] = "access.db";
-
-TestKeyRange::TestKeyRange()
-{
-}
-
-void TestKeyRange::run()
-{
- // Remove the previous database.
- (void)unlink(FileName);
-
- // Create the database object.
- // There is no environment for this simple example.
- Db db(0, 0);
-
- db.set_error_stream(&cerr);
- db.set_errpfx("TestKeyRange");
- db.set_pagesize(1024); /* Page size: 1K. */
- db.set_cachesize(0, 32 * 1024, 0);
- db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- char buf[1024];
- char rbuf[1024];
- char *t;
- char *p;
- int ret;
- int len;
- Dbt *firstkey = NULL;
- char firstbuf[1024];
-
- for (;;) {
- cout << "input>";
- cout.flush();
-
- cin.getline(buf, sizeof(buf));
- if (cin.eof())
- break;
-
- if ((len = strlen(buf)) <= 0)
- continue;
- for (t = rbuf, p = buf + (len - 1); p >= buf;)
- *t++ = *p--;
- *t++ = '\0';
-
- Dbt key(buf, len + 1);
- Dbt data(rbuf, len + 1);
- if (firstkey == NULL) {
- strcpy(firstbuf, buf);
- firstkey = new Dbt(firstbuf, len + 1);
- }
-
- ret = db.put(0, &key, &data, DB_NOOVERWRITE);
- if (ret == DB_KEYEXIST) {
- cout << "Key " << buf << " already exists.\n";
- }
- cout << "\n";
- }
-
- // We put a try block around this section of code
- // to ensure that our database is properly closed
- // in the event of an error.
- //
- try {
- // Acquire a cursor for the table.
- Dbc *dbcp;
- db.cursor(NULL, &dbcp, 0);
-
- /*ADDED...*/
- DB_KEY_RANGE range;
- memset(&range, 0, sizeof(range));
-
- db.key_range(NULL, firstkey, &range, 0);
- printf("less: %f\n", range.less);
- printf("equal: %f\n", range.equal);
- printf("greater: %f\n", range.greater);
- /*end ADDED*/
-
- Dbt key;
- Dbt data;
-
- // Walk through the table, printing the key/data pairs.
- while (dbcp->get(&key, &data, DB_NEXT) == 0) {
- char *key_string = (char *)key.get_data();
- char *data_string = (char *)data.get_data();
- cout << key_string << " : " << data_string << "\n";
- }
- dbcp->close();
- }
- catch (DbException &dbe) {
- cerr << "TestKeyRange: " << dbe.what() << "\n";
- }
-
- db.close(0);
-}
diff --git a/storage/bdb/test/scr015/TestKeyRange.testin b/storage/bdb/test/scr015/TestKeyRange.testin
deleted file mode 100644
index a2b6bd74e7b..00000000000
--- a/storage/bdb/test/scr015/TestKeyRange.testin
+++ /dev/null
@@ -1,8 +0,0 @@
-first line is alphabetically somewhere in the middle.
-Blah blah
-let's have exactly eight lines of input.
-stuff
-more stuff
-and even more stuff
-lastly
-but not leastly.
diff --git a/storage/bdb/test/scr015/TestKeyRange.testout b/storage/bdb/test/scr015/TestKeyRange.testout
deleted file mode 100644
index 25b2e1a835c..00000000000
--- a/storage/bdb/test/scr015/TestKeyRange.testout
+++ /dev/null
@@ -1,19 +0,0 @@
-input>
-input>
-input>
-input>
-input>
-input>
-input>
-input>
-input>less: 0.375000
-equal: 0.125000
-greater: 0.500000
-Blah blah : halb halB
-and even more stuff : ffuts erom neve dna
-but not leastly. : .yltsael ton tub
-first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
-lastly : yltsal
-let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
-more stuff : ffuts erom
-stuff : ffuts
diff --git a/storage/bdb/test/scr015/TestLogc.cpp b/storage/bdb/test/scr015/TestLogc.cpp
deleted file mode 100644
index 636db4530c0..00000000000
--- a/storage/bdb/test/scr015/TestLogc.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestLogc.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $
- */
-
-/*
- * A basic regression test for the Logc class.
- */
-
-#include <db_cxx.h>
-#include <iostream.h>
-
-static void show_dbt(ostream &os, Dbt *dbt)
-{
- int i;
- int size = dbt->get_size();
- unsigned char *data = (unsigned char *)dbt->get_data();
-
- os << "size: " << size << " data: ";
- for (i=0; i<size && i<10; i++) {
- os << (int)data[i] << " ";
- }
- if (i<size)
- os << "...";
-}
-
-int main(int argc, char *argv[])
-{
- try {
- DbEnv *env = new DbEnv(0);
- env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
-
- // Do some database activity to get something into the log.
- Db *db1 = new Db(env, 0);
- db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
- Dbt *key = new Dbt((char *)"a", 1);
- Dbt *data = new Dbt((char *)"b", 1);
- db1->put(NULL, key, data, 0);
- key->set_data((char *)"c");
- data->set_data((char *)"d");
- db1->put(NULL, key, data, 0);
- db1->close(0);
-
- Db *db2 = new Db(env, 0);
- db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
- key->set_data((char *)"w");
- data->set_data((char *)"x");
- db2->put(NULL, key, data, 0);
- key->set_data((char *)"y");
- data->set_data((char *)"z");
- db2->put(NULL, key, data, 0);
- db2->close(0);
-
- // Now get a log cursor and walk through.
- DbLogc *logc;
-
- env->log_cursor(&logc, 0);
- int ret = 0;
- DbLsn lsn;
- Dbt *dbt = new Dbt();
- u_int32_t flags = DB_FIRST;
-
- int count = 0;
- while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
-
- // We ignore the contents of the log record,
- // it's not portable. Even the exact count
- // is may change when the underlying implementation
- // changes, we'll just make sure at the end we saw
- // 'enough'.
- //
- // cout << "logc.get: " << count;
- // show_dbt(cout, dbt);
- // cout << "\n";
- //
- count++;
- flags = DB_NEXT;
- }
- if (ret != DB_NOTFOUND) {
- cerr << "*** FAIL: logc.get returned: "
- << DbEnv::strerror(ret) << "\n";
- }
- logc->close(0);
-
- // There has to be at *least* four log records,
- // since we did four separate database operations.
- //
- if (count < 4)
- cerr << "*** FAIL: not enough log records\n";
-
- cout << "TestLogc done.\n";
- }
- catch (DbException &dbe) {
- cerr << "*** FAIL: " << dbe.what() <<"\n";
- }
- return 0;
-}
diff --git a/storage/bdb/test/scr015/TestLogc.testout b/storage/bdb/test/scr015/TestLogc.testout
deleted file mode 100644
index afac3af7eda..00000000000
--- a/storage/bdb/test/scr015/TestLogc.testout
+++ /dev/null
@@ -1 +0,0 @@
-TestLogc done.
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.cpp b/storage/bdb/test/scr015/TestSimpleAccess.cpp
deleted file mode 100644
index 8415cda78d9..00000000000
--- a/storage/bdb/test/scr015/TestSimpleAccess.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestSimpleAccess.cpp,v 1.7 2004/01/28 03:36:33 bostic Exp $
- */
-
-/*
- * Do some regression tests for constructors.
- * Run normally (without arguments) it is a simple regression test.
- * Run with a numeric argument, it repeats the regression a number
- * of times, to try to determine if there are memory leaks.
- */
-
-#include <db_cxx.h>
-#include <iostream.h>
-
-int main(int argc, char *argv[])
-{
- try {
- Db *db = new Db(NULL, 0);
- db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
-
- // populate our massive database.
- // all our strings include null for convenience.
- // Note we have to cast for idiomatic
- // usage, since newer gcc requires it.
- Dbt *keydbt = new Dbt((char *)"key", 4);
- Dbt *datadbt = new Dbt((char *)"data", 5);
- db->put(NULL, keydbt, datadbt, 0);
-
- // Now, retrieve. We could use keydbt over again,
- // but that wouldn't be typical in an application.
- Dbt *goodkeydbt = new Dbt((char *)"key", 4);
- Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
- Dbt *resultdbt = new Dbt();
- resultdbt->set_flags(DB_DBT_MALLOC);
-
- int ret;
-
- if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
- cout << "get: " << DbEnv::strerror(ret) << "\n";
- }
- else {
- char *result = (char *)resultdbt->get_data();
- cout << "got data: " << result << "\n";
- }
-
- if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- cout << "get using bad key: "
- << DbEnv::strerror(ret) << "\n";
- }
- else {
- char *result = (char *)resultdbt->get_data();
- cout << "*** got data using bad key!!: "
- << result << "\n";
- }
- cout << "finished test\n";
- }
- catch (DbException &dbe) {
- cerr << "Db Exception: " << dbe.what();
- }
- return 0;
-}
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.testout b/storage/bdb/test/scr015/TestSimpleAccess.testout
deleted file mode 100644
index dc88d4788e4..00000000000
--- a/storage/bdb/test/scr015/TestSimpleAccess.testout
+++ /dev/null
@@ -1,3 +0,0 @@
-got data: data
-get using bad key: DB_NOTFOUND: No matching key/data pair found
-finished test
diff --git a/storage/bdb/test/scr015/TestTruncate.cpp b/storage/bdb/test/scr015/TestTruncate.cpp
deleted file mode 100644
index 54ecf81c8ef..00000000000
--- a/storage/bdb/test/scr015/TestTruncate.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestTruncate.cpp,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Do some regression tests for constructors.
- * Run normally (without arguments) it is a simple regression test.
- * Run with a numeric argument, it repeats the regression a number
- * of times, to try to determine if there are memory leaks.
- */
-
-#include <db_cxx.h>
-#include <iostream.h>
-
-int main(int argc, char *argv[])
-{
- try {
- Db *db = new Db(NULL, 0);
- db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
-
- // populate our massive database.
- // all our strings include null for convenience.
- // Note we have to cast for idiomatic
- // usage, since newer gcc requires it.
- Dbt *keydbt = new Dbt((char*)"key", 4);
- Dbt *datadbt = new Dbt((char*)"data", 5);
- db->put(NULL, keydbt, datadbt, 0);
-
- // Now, retrieve. We could use keydbt over again,
- // but that wouldn't be typical in an application.
- Dbt *goodkeydbt = new Dbt((char*)"key", 4);
- Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
- Dbt *resultdbt = new Dbt();
- resultdbt->set_flags(DB_DBT_MALLOC);
-
- int ret;
-
- if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
- cout << "get: " << DbEnv::strerror(ret) << "\n";
- }
- else {
- char *result = (char *)resultdbt->get_data();
- cout << "got data: " << result << "\n";
- }
-
- if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- cout << "get using bad key: "
- << DbEnv::strerror(ret) << "\n";
- }
- else {
- char *result = (char *)resultdbt->get_data();
- cout << "*** got data using bad key!!: "
- << result << "\n";
- }
-
- // Now, truncate and make sure that it's really gone.
- cout << "truncating data...\n";
- u_int32_t nrecords;
- db->truncate(NULL, &nrecords, 0);
- cout << "truncate returns " << nrecords << "\n";
- if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- cout << "after truncate get: "
- << DbEnv::strerror(ret) << "\n";
- }
- else {
- char *result = (char *)resultdbt->get_data();
- cout << "got data: " << result << "\n";
- }
-
- db->close(0);
- cout << "finished test\n";
- }
- catch (DbException &dbe) {
- cerr << "Db Exception: " << dbe.what();
- }
- return 0;
-}
diff --git a/storage/bdb/test/scr015/TestTruncate.testout b/storage/bdb/test/scr015/TestTruncate.testout
deleted file mode 100644
index 0a4bc98165d..00000000000
--- a/storage/bdb/test/scr015/TestTruncate.testout
+++ /dev/null
@@ -1,6 +0,0 @@
-got data: data
-get using bad key: DB_NOTFOUND: No matching key/data pair found
-truncating data...
-truncate returns 1
-after truncate get: DB_NOTFOUND: No matching key/data pair found
-finished test
diff --git a/storage/bdb/test/scr015/chk.cxxtests b/storage/bdb/test/scr015/chk.cxxtests
deleted file mode 100644
index 3d1e3947c4c..00000000000
--- a/storage/bdb/test/scr015/chk.cxxtests
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.cxxtests,v 1.8 2004/09/28 19:58:42 mjc Exp $
-#
-# Check to make sure that regression tests for C++ run.
-
-TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
-
-# All paths must be relative to a subdirectory of the build directory
-LIBS="-L.. -ldb_cxx"
-CXXFLAGS="-I.. -I../../dbinc"
-
-[ `uname` = "Linux" ] && LIBS="$LIBS -lpthread"
-
-# Test must be run from a local build directory, not from a test
-# directory.
-cd ..
-[ -f db_config.h ] || {
- echo 'FAIL: chk.cxxtests must be run from a local build directory.'
- exit 1
-}
-[ -d ../env ] || {
- echo 'FAIL: chk.cxxtests must be run from a local build directory.'
- exit 1
-}
-[ -f libdb.a ] || make libdb.a || {
- echo 'FAIL: unable to build libdb.a'
- exit 1
-}
-[ -f libdb_cxx.a ] || make libdb_cxx.a || {
- echo 'FAIL: unable to build libdb_cxx.a'
- exit 1
-}
-CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
-echo " ====== cxx tests using $CXX"
-testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
-
-for testname in $testnames; do
- if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
- echo " **** cxx test $testname ignored"
- continue
- fi
-
- echo " ==== cxx test $testname"
- rm -rf TESTCXX; mkdir TESTCXX
- cd ./TESTCXX
- testprefix=../$TEST_CXX_SRCDIR/$testname
-
- ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
- echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
- exit 1
- }
- rm -f ../$testname.compileout
- infile=$testprefix.testin
- [ -f $infile ] || infile=/dev/null
- goodoutfile=$testprefix.testout
- [ -f $goodoutfile ] || goodoutfile=/dev/null
- gooderrfile=$testprefix.testerr
- [ -f $gooderrfile ] || gooderrfile=/dev/null
- ./$testname <$infile >../$testname.out 2>../$testname.err
- cmp ../$testname.out $goodoutfile > /dev/null || {
- echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
- exit 1
- }
- cmp ../$testname.err $gooderrfile > /dev/null || {
- echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
- exit 1
- }
- cd ..
- rm -f $testname.err $testname.out
-done
-rm -rf TESTCXX
-exit 0
diff --git a/storage/bdb/test/scr015/ignore b/storage/bdb/test/scr015/ignore
deleted file mode 100644
index 55ce82ae372..00000000000
--- a/storage/bdb/test/scr015/ignore
+++ /dev/null
@@ -1,4 +0,0 @@
-#
-# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $
-#
-# A list of tests to ignore
diff --git a/storage/bdb/test/scr015/testall b/storage/bdb/test/scr015/testall
deleted file mode 100644
index a2d493a8b22..00000000000
--- a/storage/bdb/test/scr015/testall
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh -
-# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $
-#
-# Run all the C++ regression tests
-
-ecode=0
-prefixarg=""
-stdinarg=""
-while :
-do
- case "$1" in
- --prefix=* )
- prefixarg="$1"; shift;;
- --stdin )
- stdinarg="$1"; shift;;
- * )
- break
- esac
-done
-files="`find . -name \*.cpp -print`"
-for file in $files; do
- name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
- if grep $name ignore > /dev/null; then
- echo " **** cxx test $name ignored"
- else
- echo " ==== cxx test $name"
- if ! sh ./testone $prefixarg $stdinarg $name; then
- ecode=1
- fi
- fi
-done
-exit $ecode
diff --git a/storage/bdb/test/scr015/testone b/storage/bdb/test/scr015/testone
deleted file mode 100644
index 3bbba3f90f0..00000000000
--- a/storage/bdb/test/scr015/testone
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/bin/sh -
-# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $
-#
-# Run just one C++ regression test, the single argument
-# is the basename of the test, e.g. TestRpcServer
-
-error()
-{
- echo '' >&2
- echo "C++ regression error: $@" >&2
- echo '' >&2
- ecode=1
-}
-
-# compares the result against the good version,
-# reports differences, and removes the result file
-# if there are no differences.
-#
-compare_result()
-{
- good="$1"
- latest="$2"
- if [ ! -e "$good" ]; then
- echo "Note: $good does not exist"
- return
- fi
- tmpout=/tmp/blddb$$.tmp
- diff "$good" "$latest" > $tmpout
- if [ -s $tmpout ]; then
- nbad=`grep '^[0-9]' $tmpout | wc -l`
- error "$good and $latest differ in $nbad places."
- else
- rm $latest
- fi
- rm -f $tmpout
-}
-
-ecode=0
-stdinflag=n
-gdbflag=n
-CXX=${CXX:-c++}
-LIBS=${LIBS:-}
-
-# remove any -c option in the CXXFLAGS
-CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
-
-# determine the prefix of the install tree
-prefix=""
-while :
-do
- case "$1" in
- --prefix=* )
- prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
- LIBS="-L$prefix/lib -ldb_cxx $LIBS"
- CXXFLAGS="-I$prefix/include $CXXFLAGS"
- export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
- ;;
- --stdin )
- stdinflag=y; shift
- ;;
- --gdb )
- CXXFLAGS="-g $CXXFLAGS"
- gdbflag=y; shift
- ;;
- * )
- break
- ;;
- esac
-done
-
-if [ "$#" = 0 ]; then
- echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
- exit 1
-fi
-name="$1"
-
-# compile
-rm -rf TESTDIR; mkdir TESTDIR
-cd ./TESTDIR
-
-${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
-if [ $? != 0 -o -s ../$name.compileout ]; then
- error "compilation of $name failed, see $name.compileout"
- exit 1
-fi
-rm -f ../$name.compileout
-
-# find input and error file
-infile=../$name.testin
-if [ ! -f $infile ]; then
- infile=/dev/null
-fi
-
-# run and diff results
-rm -rf TESTDIR
-if [ "$gdbflag" = y ]; then
- if [ -s $infile ]; then
- echo "Input file is $infile"
- fi
- gdb ./$name
- exit 0
-elif [ "$stdinflag" = y ]; then
- ./$name >../$name.out 2>../$name.err
-else
- ./$name <$infile >../$name.out 2>../$name.err
-fi
-cd ..
-
-testerr=$name.testerr
-if [ ! -f $testerr ]; then
- testerr=/dev/null
-fi
-
-testout=$name.testout
-if [ ! -f $testout ]; then
- testout=/dev/null
-fi
-
-compare_result $testout $name.out
-compare_result $testerr $name.err
-rm -rf TESTDIR
-exit $ecode
diff --git a/storage/bdb/test/scr016/CallbackTest.java b/storage/bdb/test/scr016/CallbackTest.java
deleted file mode 100644
index 14abcf44f18..00000000000
--- a/storage/bdb/test/scr016/CallbackTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-package com.sleepycat.test;
-import com.sleepycat.db.*;
-
-public class CallbackTest
-{
- public static void main(String args[])
- {
- try {
- Db db = new Db(null, 0);
- db.setBtreeCompare(new BtreeCompare());
- db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
- StringDbt[] keys = new StringDbt[10];
- StringDbt[] datas = new StringDbt[10];
- for (int i = 0; i<10; i++) {
- int val = (i * 3) % 10;
- keys[i] = new StringDbt("key" + val);
- datas[i] = new StringDbt("data" + val);
- System.out.println("put " + val);
- db.put(null, keys[i], datas[i], 0);
- }
- }
- catch (DbException dbe) {
- System.err.println("FAIL: " + dbe);
- }
- catch (java.io.FileNotFoundException fnfe) {
- System.err.println("FAIL: " + fnfe);
- }
-
- }
-
-
-}
-
-class BtreeCompare
- implements DbBtreeCompare
-{
- /* A weird comparator, for example.
- * In fact, it may not be legal, since it's not monotonically increasing.
- */
- public int compare(Db db, Dbt dbt1, Dbt dbt2)
- {
- System.out.println("compare function called");
- byte b1[] = dbt1.getData();
- byte b2[] = dbt2.getData();
- System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
- int len1 = b1.length;
- int len2 = b2.length;
- if (len1 != len2)
- return (len1 < len2) ? 1 : -1;
- int value = 1;
- for (int i=0; i<len1; i++) {
- if (b1[i] != b2[i])
- return (b1[i] < b2[i]) ? value : -value;
- value *= -1;
- }
- return 0;
- }
-}
-
-class StringDbt extends Dbt
-{
- StringDbt()
- {
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- setData(value.getBytes());
- setSize(value.length());
- }
-
- String getString()
- {
- return new String(getData(), 0, getSize());
- }
-}
diff --git a/storage/bdb/test/scr016/CallbackTest.testout b/storage/bdb/test/scr016/CallbackTest.testout
deleted file mode 100644
index 447888285ab..00000000000
--- a/storage/bdb/test/scr016/CallbackTest.testout
+++ /dev/null
@@ -1,54 +0,0 @@
-put 0
-put 3
-compare function called
- key3, key0
-put 6
-compare function called
- key6, key3
-put 9
-compare function called
- key9, key3
-compare function called
- key9, key6
-put 2
-compare function called
- key2, key6
-compare function called
- key2, key3
-compare function called
- key2, key0
-put 5
-compare function called
- key5, key3
-compare function called
- key5, key9
-compare function called
- key5, key6
-put 8
-compare function called
- key8, key5
-compare function called
- key8, key9
-compare function called
- key8, key6
-put 1
-compare function called
- key1, key5
-compare function called
- key1, key2
-compare function called
- key1, key0
-put 4
-compare function called
- key4, key5
-compare function called
- key4, key2
-compare function called
- key4, key3
-put 7
-compare function called
- key7, key4
-compare function called
- key7, key8
-compare function called
- key7, key6
diff --git a/storage/bdb/test/scr016/README b/storage/bdb/test/scr016/README
deleted file mode 100644
index 226a8aa3b77..00000000000
--- a/storage/bdb/test/scr016/README
+++ /dev/null
@@ -1,37 +0,0 @@
-# $Id: README,v 1.2 2001/05/31 23:09:10 dda Exp $
-
-Use the scripts testall or testone to run all, or just one of the Java
-tests. You must be in this directory to run them. For example,
-
- $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
- $ ./testone TestAppendRecno
- $ ./testall
-
-The scripts will use javac and java in your path. Set environment
-variables $JAVAC and $JAVA to override this. It will also and honor
-any $CLASSPATH that is already set, prepending ../../../../classes to
-it, which is where the test .class files are put, and where the DB
-.class files can normally be found after a build on Unix and Windows.
-If none of these variables are set, everything will probably work
-with whatever java/javac is in your path.
-
-To run successfully, you will probably need to set $LD_LIBRARY_PATH
-to be the directory containing libdb_java-X.Y.so
-
-As an alternative, use the --prefix=<DIR> option, a la configure
-to set the top of the BerkeleyDB install directory. This forces
-the proper options to be added to $LD_LIBRARY_PATH.
-For example,
-
- $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
- $ ./testall --prefix=/usr/include/BerkeleyDB
-
-The test framework is pretty simple. Any <name>.java file in this
-directory that is not mentioned in the 'ignore' file represents a
-test. If the test is not compiled successfully, the compiler output
-is left in <name>.compileout . Otherwise, the java program is run in
-a clean subdirectory using as input <name>.testin, or if that doesn't
-exist, /dev/null. Output and error from the test run are put into
-<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
-they are used as reference files and any differences are reported.
-If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr016/TestAppendRecno.java b/storage/bdb/test/scr016/TestAppendRecno.java
deleted file mode 100644
index 4237b99db7d..00000000000
--- a/storage/bdb/test/scr016/TestAppendRecno.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestAppendRecno.java,v 1.6 2004/01/28 03:36:34 bostic Exp $
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.io.PrintStream;
-
-public class TestAppendRecno
- implements DbAppendRecno
-{
- private static final String FileName = "access.db";
- int callback_count = 0;
- Db table = null;
-
- public TestAppendRecno()
- {
- }
-
- private static void usage()
- {
- System.err.println("usage: TestAppendRecno\n");
- System.exit(1);
- }
-
- public static void main(String argv[])
- {
- try
- {
- TestAppendRecno app = new TestAppendRecno();
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestAppendRecno: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestAppendRecno: " + fnfe.toString());
- System.exit(1);
- }
- System.exit(0);
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- // Create the database object.
- // There is no environment for this simple example.
- table = new Db(null, 0);
- table.set_error_stream(System.err);
- table.set_errpfx("TestAppendRecno");
- table.set_append_recno(this);
-
- table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
- for (int i=0; i<10; i++) {
- System.out.println("\n*** Iteration " + i );
- try {
- RecnoDbt key = new RecnoDbt(77+i);
- StringDbt data = new StringDbt("data" + i + "_xyz");
- table.put(null, key, data, Db.DB_APPEND);
- }
- catch (DbException dbe) {
- System.out.println("dbe: " + dbe);
- }
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- RecnoDbt key = new RecnoDbt();
- StringDbt data = new StringDbt();
- while (iterator.get(key, data, Db.DB_NEXT) == 0)
- {
- System.out.println(key.getRecno() + " : " + data.getString());
- }
- iterator.close();
- table.close(0);
- System.out.println("Test finished.");
- }
-
- public void db_append_recno(Db db, Dbt dbt, int recno)
- throws DbException
- {
- int count = callback_count++;
-
- System.out.println("====\ncallback #" + count);
- System.out.println("db is table: " + (db == table));
- System.out.println("recno = " + recno);
-
- // This gives variable output.
- //System.out.println("dbt = " + dbt);
- if (dbt instanceof RecnoDbt) {
- System.out.println("dbt = " +
- ((RecnoDbt)dbt).getRecno());
- }
- else if (dbt instanceof StringDbt) {
- System.out.println("dbt = " +
- ((StringDbt)dbt).getString());
- }
- else {
- // Note: the dbts are created out of whole
- // cloth by Berkeley DB, not us!
- System.out.println("internally created dbt: " +
- new StringDbt(dbt) + ", size " +
- dbt.get_size());
- }
-
- switch (count) {
- case 0:
- // nothing
- break;
-
- case 1:
- dbt.set_size(dbt.get_size() - 1);
- break;
-
- case 2:
- System.out.println("throwing...");
- throw new DbException("append_recno thrown");
- //not reached
-
- case 3:
- // Should result in an error (size unchanged).
- dbt.set_offset(1);
- break;
-
- case 4:
- dbt.set_offset(1);
- dbt.set_size(dbt.get_size() - 1);
- break;
-
- case 5:
- dbt.set_offset(1);
- dbt.set_size(dbt.get_size() - 2);
- break;
-
- case 6:
- dbt.set_data(new String("abc").getBytes());
- dbt.set_size(3);
- break;
-
- case 7:
- // Should result in an error.
- dbt.set_data(null);
- break;
-
- case 8:
- // Should result in an error.
- dbt.set_data(new String("abc").getBytes());
- dbt.set_size(4);
- break;
-
- default:
- break;
- }
- }
-
-
- // Here's an example of how you can extend a Dbt to store recno's.
- //
- static /*inner*/
- class RecnoDbt extends Dbt
- {
- RecnoDbt()
- {
- this(0); // let other constructor do most of the work
- }
-
- RecnoDbt(int value)
- {
- set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
- arr = new byte[4];
- set_data(arr); // use our local array for data
- set_ulen(4); // size of return storage
- setRecno(value);
- }
-
- public String toString() /*override*/
- {
- return String.valueOf(getRecno());
- }
-
- void setRecno(int value)
- {
- set_recno_key_data(value);
- set_size(arr.length);
- }
-
- int getRecno()
- {
- return get_recno_key_data();
- }
-
- byte arr[];
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- StringDbt(Dbt dbt)
- {
- set_data(dbt.get_data());
- set_size(dbt.get_size());
- }
-
- StringDbt()
- {
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- set_data(value.getBytes());
- set_size(value.length());
- }
-
- String getString()
- {
- return new String(get_data(), 0, get_size());
- }
-
- public String toString() /*override*/
- {
- return getString();
- }
- }
-}
-
diff --git a/storage/bdb/test/scr016/TestAppendRecno.testout b/storage/bdb/test/scr016/TestAppendRecno.testout
deleted file mode 100644
index 970174e7a96..00000000000
--- a/storage/bdb/test/scr016/TestAppendRecno.testout
+++ /dev/null
@@ -1,82 +0,0 @@
-
-*** Iteration 0
-====
-callback #0
-db is table: true
-recno = 1
-internally created dbt: data0_xyz, size 9
-
-*** Iteration 1
-====
-callback #1
-db is table: true
-recno = 2
-internally created dbt: data1_xyz, size 9
-
-*** Iteration 2
-====
-callback #2
-db is table: true
-recno = 3
-internally created dbt: data2_xyz, size 9
-throwing...
-dbe: com.sleepycat.db.DbException: append_recno thrown
-
-*** Iteration 3
-====
-callback #3
-db is table: true
-recno = 3
-internally created dbt: data3_xyz, size 9
-dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
-
-*** Iteration 4
-====
-callback #4
-db is table: true
-recno = 3
-internally created dbt: data4_xyz, size 9
-
-*** Iteration 5
-====
-callback #5
-db is table: true
-recno = 4
-internally created dbt: data5_xyz, size 9
-
-*** Iteration 6
-====
-callback #6
-db is table: true
-recno = 5
-internally created dbt: data6_xyz, size 9
-
-*** Iteration 7
-====
-callback #7
-db is table: true
-recno = 6
-internally created dbt: data7_xyz, size 9
-dbe: com.sleepycat.db.DbException: Dbt.data is null
-
-*** Iteration 8
-====
-callback #8
-db is table: true
-recno = 6
-internally created dbt: data8_xyz, size 9
-dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
-
-*** Iteration 9
-====
-callback #9
-db is table: true
-recno = 6
-internally created dbt: data9_xyz, size 9
-1 : data0_xyz
-2 : data1_xy
-3 : ata4_xyz
-4 : ata5_xy
-5 : abc
-6 : data9_xyz
-Test finished.
diff --git a/storage/bdb/test/scr016/TestAssociate.java b/storage/bdb/test/scr016/TestAssociate.java
deleted file mode 100644
index 80451d1e66b..00000000000
--- a/storage/bdb/test/scr016/TestAssociate.java
+++ /dev/null
@@ -1,333 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestAssociate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Hashtable;
-
-public class TestAssociate
- implements DbDupCompare
-{
- private static final String FileName = "access.db";
- public static Db saveddb1 = null;
- public static Db saveddb2 = null;
-
- public TestAssociate()
- {
- }
-
- private static void usage()
- {
- System.err.println("usage: TestAssociate\n");
- System.exit(1);
- }
-
- public static void main(String argv[])
- {
- try
- {
- TestAssociate app = new TestAssociate();
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestAssociate: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestAssociate: " + fnfe.toString());
- System.exit(1);
- }
- System.exit(0);
- }
-
- public static int counter = 0;
- public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
-
- // Prompts for a line, and keeps prompting until a non blank
- // line is returned. Returns null on error.
- //
- static public String askForLine(Reader reader,
- PrintStream out, String prompt)
- {
- /*
- String result = "";
- while (result != null && result.length() == 0) {
- out.print(prompt);
- out.flush();
- result = getLine(reader);
- }
- return result;
- */
- return results[counter++];
- }
-
- // Not terribly efficient, but does the job.
- // Works for reading a line from stdin or a file.
- // Returns null on EOF. If EOF appears in the middle
- // of a line, returns that line, then null on next call.
- //
- static public String getLine(Reader reader)
- {
- StringBuffer b = new StringBuffer();
- int c;
- try {
- while ((c = reader.read()) != -1 && c != '\n') {
- if (c != '\r')
- b.append((char)c);
- }
- }
- catch (IOException ioe) {
- c = -1;
- }
-
- if (c == -1 && b.length() == 0)
- return null;
- else
- return b.toString();
- }
-
- static public String shownull(Object o)
- {
- if (o == null)
- return "null";
- else
- return "not null";
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- // Create the database object.
- // There is no environment for this simple example.
- DbEnv dbenv = new DbEnv(0);
- dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
- (new java.io.File(FileName)).delete();
- Db table = new Db(dbenv, 0);
- Db table2 = new Db(dbenv, 0);
- table2.set_dup_compare(this);
- table2.set_flags(Db.DB_DUPSORT);
- table.set_error_stream(System.err);
- table2.set_error_stream(System.err);
- table.set_errpfx("TestAssociate");
- table2.set_errpfx("TestAssociate(table2)");
- System.out.println("Primary database is " + shownull(table));
- System.out.println("Secondary database is " + shownull(table2));
- saveddb1 = table;
- saveddb2 = table2;
- table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
- table2.open(null, FileName + "2", null,
- Db.DB_BTREE, Db.DB_CREATE, 0644);
- table.associate(null, table2, new Capitalize(), 0);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- Reader reader = new StringReader("abc\ndef\njhi");
-
- for (;;) {
- String line = askForLine(reader, System.out, "input> ");
- if (line == null)
- break;
-
- String reversed = (new StringBuffer(line)).reverse().toString();
-
- // See definition of StringDbt below
- //
- StringDbt key = new StringDbt(line);
- StringDbt data = new StringDbt(reversed);
-
- try
- {
- int err;
- if ((err = table.put(null,
- key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
- System.out.println("Key " + line + " already exists.");
- }
- }
- catch (DbException dbe)
- {
- System.out.println(dbe.toString());
- }
- System.out.println("");
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table2.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- StringDbt key = new StringDbt();
- StringDbt data = new StringDbt();
- StringDbt pkey = new StringDbt();
-
- while (iterator.get(key, data, Db.DB_NEXT) == 0)
- {
- System.out.println(key.getString() + " : " + data.getString());
- }
-
- key.setString("BC");
- System.out.println("get BC returns " + table2.get(null, key, data, 0));
- System.out.println(" values: " + key.getString() + " : " + data.getString());
- System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
- System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
- key.setString("KL");
- System.out.println("get KL returns " + table2.get(null, key, data, 0));
- System.out.println(" values: " + key.getString() + " : " + data.getString());
- System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
- System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
-
- iterator.close();
- table.close(0);
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- StringDbt()
- {
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- set_data(value.getBytes());
- set_size(value.length());
- }
-
- String getString()
- {
- return new String(get_data(), 0, get_size());
- }
-
- public String toString()
- {
- return "StringDbt=" + getString();
- }
- }
-
- /* creates a stupid secondary index as follows:
- For an N letter key, we use N-1 letters starting at
- position 1. If the new letters are already capitalized,
- we return the old array, but with offset set to 1.
- If the letters are not capitalized, we create a new,
- capitalized array. This is pretty stupid for
- an application, but it tests all the paths in the runtime.
- */
- public static class Capitalize implements DbSecondaryKeyCreate
- {
- public int secondaryKeyCreate(Db secondary, Dbt key, Dbt value,
- Dbt result)
- throws DbException
- {
- String which = "unknown db";
- if (saveddb1.equals(secondary)) {
- which = "primary";
- }
- else if (saveddb2.equals(secondary)) {
- which = "secondary";
- }
- System.out.println("secondaryKeyCreate, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
- int len = key.get_size();
- byte[] arr = key.get_data();
- boolean capped = true;
-
- if (len < 1)
- throw new DbException("bad key");
-
- if (len < 2)
- return Db.DB_DONOTINDEX;
-
- result.set_size(len - 1);
- for (int i=1; capped && i<len; i++) {
- if (!Character.isUpperCase((char)arr[i]))
- capped = false;
- }
- if (capped) {
- System.out.println(" creating key(1): " + new String(arr, 1, len-1));
- result.set_data(arr);
- result.set_offset(1);
- }
- else {
- System.out.println(" creating key(2): " + (new String(arr)).substring(1).
- toUpperCase());
- result.set_data((new String(arr)).substring(1).
- toUpperCase().getBytes());
- }
- return 0;
- }
- }
-
- public int compareDuplicates(Db db, Dbt dbt1, Dbt dbt2)
- {
- System.out.println("compare");
- int sz1 = dbt1.get_size();
- int sz2 = dbt2.get_size();
- if (sz1 < sz2)
- return -1;
- if (sz1 > sz2)
- return 1;
- byte[] data1 = dbt1.get_data();
- byte[] data2 = dbt2.get_data();
- for (int i=0; i<sz1; i++)
- if (data1[i] != data2[i])
- return (data1[i] < data2[i] ? -1 : 1);
- return 0;
- }
-
- public static int nseen = 0;
- public static Hashtable ht = new Hashtable();
-
- public static String show_dbt(Dbt dbt)
- {
- String name;
-
- if (dbt == null)
- return "null dbt";
-
- name = (String)ht.get(dbt);
- if (name == null) {
- name = "Dbt" + (nseen++);
- ht.put(dbt, name);
- }
-
- byte[] value = dbt.get_data();
- if (value == null)
- return name + "(null)";
- else
- return name + "(\"" + new String(value) + "\")";
- }
-}
-
-
diff --git a/storage/bdb/test/scr016/TestAssociate.testout b/storage/bdb/test/scr016/TestAssociate.testout
deleted file mode 100644
index 34414b660d1..00000000000
--- a/storage/bdb/test/scr016/TestAssociate.testout
+++ /dev/null
@@ -1,30 +0,0 @@
-Primary database is not null
-Secondary database is not null
-secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
- creating key(2): BC
-
-secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
- creating key(2): EF
-
-secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
- creating key(2): HI
-
-secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
- creating key(1): KL
-
-secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
- creating key(1): NO
-
-BC : cba
-EF : fed
-HI : ihg
-KL : LKJ
-NO : ONM
-get BC returns 0
- values: BC : cba
-pget BC returns 0
- values: BC : abc : cba
-get KL returns 0
- values: KL : LKJ
-pget KL returns 0
- values: KL : JKL : LKJ
diff --git a/storage/bdb/test/scr016/TestClosedDb.java b/storage/bdb/test/scr016/TestClosedDb.java
deleted file mode 100644
index 50ccdcc8800..00000000000
--- a/storage/bdb/test/scr016/TestClosedDb.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestClosedDb.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Close the Db, and make sure operations after that fail gracefully.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestClosedDb
-{
- public static void main(String[] args)
- {
- try {
- Db db = new Db(null, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- // populate our massive database.
- Dbt keydbt = new Dbt("key".getBytes());
- Dbt datadbt = new Dbt("data".getBytes());
- db.put(null, keydbt, datadbt, 0);
-
- // Now, retrieve. We could use keydbt over again,
- // but that wouldn't be typical in an application.
- Dbt goodkeydbt = new Dbt("key".getBytes());
- Dbt badkeydbt = new Dbt("badkey".getBytes());
- Dbt resultdbt = new Dbt();
- resultdbt.setFlags(Db.DB_DBT_MALLOC);
-
- int ret;
-
- // Close the db - subsequent operations should fail
- // by throwing an exception.
- db.close(0);
- try {
- db.get(null, goodkeydbt, resultdbt, 0);
- System.out.println("Error - did not expect to get this far.");
- }
- catch (IllegalArgumentException dbe) {
- System.out.println("Got expected exception: " + dbe);
- }
- System.out.println("finished test");
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
-
- }
-
-}
diff --git a/storage/bdb/test/scr016/TestClosedDb.testout b/storage/bdb/test/scr016/TestClosedDb.testout
deleted file mode 100644
index ffa53713635..00000000000
--- a/storage/bdb/test/scr016/TestClosedDb.testout
+++ /dev/null
@@ -1,2 +0,0 @@
-Got expected exception: java.lang.IllegalArgumentException: call on closed handle
-finished test
diff --git a/storage/bdb/test/scr016/TestConstruct01.java b/storage/bdb/test/scr016/TestConstruct01.java
deleted file mode 100644
index 6309409c7b4..00000000000
--- a/storage/bdb/test/scr016/TestConstruct01.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestConstruct01.java,v 1.10 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Do some regression tests for constructors.
- * Run normally (without arguments) it is a simple regression test.
- * Run with a numeric argument, it repeats the regression a number
- * of times, to try to determine if there are memory leaks.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.IOException;
-import java.io.FileNotFoundException;
-
-public class TestConstruct01
-{
- public static final String CONSTRUCT01_DBNAME = "construct01.db";
- public static final String CONSTRUCT01_DBDIR = "/tmp";
- public static final String CONSTRUCT01_DBFULLPATH =
- CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
-
- private int itemcount; // count the number of items in the database
- public static boolean verbose_flag = false;
-
- public static void ERR(String a)
- {
- System.out.println("FAIL: " + a);
- System.err.println("FAIL: " + a);
- sysexit(1);
- }
-
- public static void DEBUGOUT(String s)
- {
- System.out.println(s);
- }
-
- public static void VERBOSEOUT(String s)
- {
- if (verbose_flag)
- System.out.println(s);
- }
-
- public static void sysexit(int code)
- {
- System.exit(code);
- }
-
- private static void check_file_removed(String name, boolean fatal,
- boolean force_remove_first)
- {
- File f = new File(name);
- if (force_remove_first) {
- f.delete();
- }
- if (f.exists()) {
- if (fatal)
- System.out.print("FAIL: ");
- System.out.print("File \"" + name + "\" still exists after run\n");
- if (fatal)
- sysexit(1);
- }
- }
-
-
- // Check that key/data for 0 - count-1 are already present,
- // and write a key/data for count. The key and data are
- // both "0123...N" where N == count-1.
- //
- // For some reason on Windows, we need to open using the full pathname
- // of the file when there is no environment, thus the 'has_env'
- // variable.
- //
- void rundb(Db db, int count, boolean has_env, TestOptions options)
- throws DbException, FileNotFoundException
- {
- String name;
-
- if (has_env)
- name = CONSTRUCT01_DBNAME;
- else
- name = CONSTRUCT01_DBFULLPATH;
-
- db.set_error_stream(System.err);
-
- // We don't really care about the pagesize, but we do want
- // to make sure adjusting Db specific variables works before
- // opening the db.
- //
- db.set_pagesize(1024);
- db.open(null, name, null, Db.DB_BTREE,
- (count != 0) ? 0 : Db.DB_CREATE, 0664);
-
-
- // The bit map of keys we've seen
- long bitmap = 0;
-
- // The bit map of keys we expect to see
- long expected = (1 << (count+1)) - 1;
-
- byte outbuf[] = new byte[count+1];
- int i;
- for (i=0; i<count; i++) {
- outbuf[i] = (byte)('0' + i);
- //outbuf[i] = System.out.println((byte)('0' + i);
- }
- outbuf[i++] = (byte)'x';
-
- /*
- System.out.println("byte: " + ('0' + 0) + ", after: " +
- (int)'0' + "=" + (int)('0' + 0) +
- "," + (byte)outbuf[0]);
- */
-
- Dbt key = new Dbt(outbuf, 0, i);
- Dbt data = new Dbt(outbuf, 0, i);
-
- //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf, 0, i));
- db.put(null, key, data, Db.DB_NOOVERWRITE);
-
- // Acquire a cursor for the table.
- Dbc dbcp = db.cursor(null, 0);
-
- // Walk through the table, checking
- Dbt readkey = new Dbt();
- Dbt readdata = new Dbt();
- Dbt whoknows = new Dbt();
-
- readkey.set_flags(options.dbt_alloc_flags);
- readdata.set_flags(options.dbt_alloc_flags);
-
- //DEBUGOUT("Dbc.get");
- while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
- String key_string =
- new String(readkey.get_data(), 0, readkey.get_size());
- String data_string =
- new String(readdata.get_data(), 0, readkey.get_size());
- //DEBUGOUT("Got: " + key_string + ": " + data_string);
- int len = key_string.length();
- if (len <= 0 || key_string.charAt(len-1) != 'x') {
- ERR("reread terminator is bad");
- }
- len--;
- long bit = (1 << len);
- if (len > count) {
- ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
- }
- else if (!data_string.equals(key_string)) {
- ERR("key/data don't match");
- }
- else if ((bitmap & bit) != 0) {
- ERR("key already seen");
- }
- else if ((expected & bit) == 0) {
- ERR("key was not expected");
- }
- else {
- bitmap |= bit;
- expected &= ~(bit);
- for (i=0; i<len; i++) {
- if (key_string.charAt(i) != ('0' + i)) {
- System.out.print(" got " + key_string
- + " (" + (int)key_string.charAt(i)
- + "), wanted " + i
- + " (" + (int)('0' + i)
- + ") at position " + i + "\n");
- ERR("key is corrupt");
- }
- }
- }
- }
- if (expected != 0) {
- System.out.print(" expected more keys, bitmap is: " + expected + "\n");
- ERR("missing keys in database");
- }
-
- dbcp.close();
- db.close(0);
- }
-
- void t1(TestOptions options)
- throws DbException, FileNotFoundException
- {
- Db db = new Db(null, 0);
- rundb(db, itemcount++, false, options);
- }
-
- void t2(TestOptions options)
- throws DbException, FileNotFoundException
- {
- Db db = new Db(null, 0);
- rundb(db, itemcount++, false, options);
- // rundb(db, itemcount++, false, options);
- // rundb(db, itemcount++, false, options);
- }
-
- void t3(TestOptions options)
- throws DbException, FileNotFoundException
- {
- Db db = new Db(null, 0);
- // rundb(db, itemcount++, false, options);
- db.set_errpfx("test3");
- for (int i=0; i<100; i++)
- db.set_errpfx("str" + i);
- rundb(db, itemcount++, false, options);
- }
-
- void t4(TestOptions options)
- throws DbException, FileNotFoundException
- {
- DbEnv env = new DbEnv(0);
- env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
- Db db = new Db(env, 0);
- /**/
- //rundb(db, itemcount++, true, options);
- db.set_errpfx("test4");
- rundb(db, itemcount++, true, options);
- /**/
- env.close(0);
- }
-
- void t5(TestOptions options)
- throws DbException, FileNotFoundException
- {
- DbEnv env = new DbEnv(0);
- env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
- Db db = new Db(env, 0);
- // rundb(db, itemcount++, true, options);
- db.set_errpfx("test5");
- rundb(db, itemcount++, true, options);
- /*
- env.close(0);
-
- // reopen the environment, don't recreate
- env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
- // Note we cannot reuse the old Db!
- */
- Db anotherdb = new Db(env, 0);
-
- // rundb(anotherdb, itemcount++, true, options);
- anotherdb.set_errpfx("test5");
- rundb(anotherdb, itemcount++, true, options);
- env.close(0);
- }
-
- void t6(TestOptions options)
- throws DbException, FileNotFoundException
- {
- Db db = new Db(null, 0);
- DbEnv dbenv = new DbEnv(0);
- db.close(0);
- dbenv.close(0);
-
- System.gc();
- System.runFinalization();
- }
-
- // By design, t7 leaves a db and dbenv open; it should be detected.
- void t7(TestOptions options)
- throws DbException, FileNotFoundException
- {
- Db db = new Db(null, 0);
- DbEnv dbenv = new DbEnv(0);
-
- System.gc();
- System.runFinalization();
- }
-
- // remove any existing environment or database
- void removeall(boolean use_db)
- {
- {
- if (use_db) {
- try {
- /**/
- //memory leak for this:
- Db tmpdb = new Db(null, 0);
- tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
- /**/
- DbEnv tmpenv = new DbEnv(0);
- tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
- }
- catch (DbException dbe) {
- System.err.println("error during remove: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- //expected error:
- // System.err.println("error during remove: " + fnfe);
- }
- }
- }
- check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
- for (int i=0; i<8; i++) {
- String fname = "__db.00" + i;
- check_file_removed(fname, true, !use_db);
- }
- }
-
- boolean doall(TestOptions options)
- {
- itemcount = 0;
- try {
- removeall((options.testmask & 1) != 0);
- for (int item=1; item<32; item++) {
- if ((options.testmask & (1 << item)) != 0) {
- VERBOSEOUT(" Running test " + item + ":");
- switch (item) {
- case 1:
- t1(options);
- break;
- case 2:
- t2(options);
- break;
- case 3:
- t3(options);
- break;
- case 4:
- t4(options);
- break;
- case 5:
- t5(options);
- break;
- case 6:
- t6(options);
- break;
- case 7:
- t7(options);
- break;
- default:
- ERR("unknown test case: " + item);
- break;
- }
- VERBOSEOUT(" finished.\n");
- }
- }
- removeall((options.testmask & 1) != 0);
- options.successcounter++;
- return true;
- }
- catch (DbException dbe) {
- ERR("EXCEPTION RECEIVED: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- ERR("EXCEPTION RECEIVED: " + fnfe);
- }
- return false;
- }
-
- public static void main(String args[])
- {
- int iterations = 200;
- int mask = 0x7f;
-
- // Make sure the database file is removed before we start.
- check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
-
- for (int argcnt=0; argcnt<args.length; argcnt++) {
- String arg = args[argcnt];
- if (arg.charAt(0) == '-') {
- // keep on lower bit, which means to remove db between tests.
- mask = 1;
- for (int pos=1; pos<arg.length(); pos++) {
- char ch = arg.charAt(pos);
- if (ch >= '0' && ch <= '9') {
- mask |= (1 << (ch - '0'));
- }
- else if (ch == 'v') {
- verbose_flag = true;
- }
- else {
- ERR("Usage: construct01 [-testdigits] count");
- }
- }
- VERBOSEOUT("mask = " + mask);
-
- }
- else {
- try {
- iterations = Integer.parseInt(arg);
- if (iterations < 0) {
- ERR("Usage: construct01 [-testdigits] count");
- }
- }
- catch (NumberFormatException nfe) {
- ERR("EXCEPTION RECEIVED: " + nfe);
- }
- }
- }
-
- // Run GC before and after the test to give
- // a baseline for any Java memory used.
- //
- System.gc();
- System.runFinalization();
- VERBOSEOUT("gc complete");
- long starttotal = Runtime.getRuntime().totalMemory();
- long startfree = Runtime.getRuntime().freeMemory();
-
- TestConstruct01 con = new TestConstruct01();
- int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
- String[] dbt_flags_name = { "default", "malloc", "realloc" };
-
- TestOptions options = new TestOptions();
- options.testmask = mask;
-
- for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
- options.dbt_alloc_flags = dbt_flags[flagiter];
-
- VERBOSEOUT("Running with DBT alloc flags: " +
- dbt_flags_name[flagiter]);
- for (int i=0; i<iterations; i++) {
- if (iterations != 0) {
- VERBOSEOUT("(" + i + "/" + iterations + ") ");
- }
- VERBOSEOUT("construct01 running:");
- if (!con.doall(options)) {
- ERR("SOME TEST FAILED");
- }
- else {
- VERBOSEOUT("\nTESTS SUCCESSFUL");
- }
-
- // We continually run GC during the test to keep
- // the Java memory usage low. That way we can
- // monitor the total memory usage externally
- // (e.g. via ps) and verify that we aren't leaking
- // memory in the JNI or DB layer.
- //
- System.gc();
- System.runFinalization();
- VERBOSEOUT("gc complete, bytes free == " + Runtime.getRuntime().freeMemory());
- }
- }
-
- if (options.successcounter == 600) {
- System.out.println("ALL TESTS SUCCESSFUL");
- }
- else {
- System.out.println("***FAIL: " + (600 - options.successcounter) +
- " tests did not complete");
- }
- long endtotal = Runtime.getRuntime().totalMemory();
- long endfree = Runtime.getRuntime().freeMemory();
-
- System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
- System.out.println("delta for free mem: " + magnitude(endfree - startfree));
-
- return;
- }
-
- static String magnitude(long value)
- {
- final long max = 10000000;
- for (long scale = 10; scale <= max; scale *= 10) {
- if (value < scale && value > -scale)
- return "<" + scale;
- }
- return ">" + max;
- }
-
-}
-
-class TestOptions
-{
- int testmask = 0; // which tests to run
- int dbt_alloc_flags = 0; // DB_DBT_* flags to use
- int successcounter =0;
-}
-
diff --git a/storage/bdb/test/scr016/TestConstruct01.testerr b/storage/bdb/test/scr016/TestConstruct01.testerr
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/storage/bdb/test/scr016/TestConstruct01.testerr
+++ /dev/null
diff --git a/storage/bdb/test/scr016/TestConstruct01.testout b/storage/bdb/test/scr016/TestConstruct01.testout
deleted file mode 100644
index 2de13da0036..00000000000
--- a/storage/bdb/test/scr016/TestConstruct01.testout
+++ /dev/null
@@ -1,3 +0,0 @@
-ALL TESTS SUCCESSFUL
-delta for total mem: <10
-delta for free mem: <1000
diff --git a/storage/bdb/test/scr016/TestConstruct02.java b/storage/bdb/test/scr016/TestConstruct02.java
deleted file mode 100644
index 2e55cfc6f32..00000000000
--- a/storage/bdb/test/scr016/TestConstruct02.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestConstruct02.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Do some regression tests for constructors.
- * Run normally (without arguments) it is a simple regression test.
- * Run with a numeric argument, it repeats the regression a number
- * of times, to try to determine if there are memory leaks.
- */
-
-package com.sleepycat.test;
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.IOException;
-import java.io.FileNotFoundException;
-
-public class TestConstruct02
-{
- public static final String CONSTRUCT02_DBNAME = "construct02.db";
- public static final String CONSTRUCT02_DBDIR = "./";
- public static final String CONSTRUCT02_DBFULLPATH =
- CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
-
- private int itemcount; // count the number of items in the database
- public static boolean verbose_flag = false;
-
- private DbEnv dbenv = new DbEnv(0);
-
- public TestConstruct02()
- throws DbException, FileNotFoundException
- {
- dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
- }
-
- public void close()
- {
- try {
- dbenv.close(0);
- removeall(true, true);
- }
- catch (DbException dbe) {
- ERR("DbException: " + dbe);
- }
- }
-
- public static void ERR(String a)
- {
- System.out.println("FAIL: " + a);
- sysexit(1);
- }
-
- public static void DEBUGOUT(String s)
- {
- System.out.println(s);
- }
-
- public static void VERBOSEOUT(String s)
- {
- if (verbose_flag)
- System.out.println(s);
- }
-
- public static void sysexit(int code)
- {
- System.exit(code);
- }
-
- private static void check_file_removed(String name, boolean fatal,
- boolean force_remove_first)
- {
- File f = new File(name);
- if (force_remove_first) {
- f.delete();
- }
- if (f.exists()) {
- if (fatal)
- System.out.print("FAIL: ");
- System.out.print("File \"" + name + "\" still exists after run\n");
- if (fatal)
- sysexit(1);
- }
- }
-
-
- // Check that key/data for 0 - count-1 are already present,
- // and write a key/data for count. The key and data are
- // both "0123...N" where N == count-1.
- //
- void rundb(Db db, int count)
- throws DbException, FileNotFoundException
- {
- if (count >= 64)
- throw new IllegalArgumentException("rundb count arg >= 64");
-
- // The bit map of keys we've seen
- long bitmap = 0;
-
- // The bit map of keys we expect to see
- long expected = (1 << (count+1)) - 1;
-
- byte outbuf[] = new byte[count+1];
- int i;
- for (i=0; i<count; i++) {
- outbuf[i] = (byte)('0' + i);
- }
- outbuf[i++] = (byte)'x';
-
- Dbt key = new Dbt(outbuf, 0, i);
- Dbt data = new Dbt(outbuf, 0, i);
-
- db.put(null, key, data, Db.DB_NOOVERWRITE);
-
- // Acquire a cursor for the table.
- Dbc dbcp = db.cursor(null, 0);
-
- // Walk through the table, checking
- Dbt readkey = new Dbt();
- Dbt readdata = new Dbt();
- Dbt whoknows = new Dbt();
-
- readkey.set_flags(Db.DB_DBT_MALLOC);
- readdata.set_flags(Db.DB_DBT_MALLOC);
-
- while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
- byte[] key_bytes = readkey.get_data();
- byte[] data_bytes = readdata.get_data();
-
- int len = key_bytes.length;
- if (len != data_bytes.length) {
- ERR("key and data are different");
- }
- for (i=0; i<len-1; i++) {
- byte want = (byte)('0' + i);
- if (key_bytes[i] != want || data_bytes[i] != want) {
- System.out.println(" got " + new String(key_bytes) +
- "/" + new String(data_bytes));
- ERR("key or data is corrupt");
- }
- }
- if (len <= 0 ||
- key_bytes[len-1] != (byte)'x' ||
- data_bytes[len-1] != (byte)'x') {
- ERR("reread terminator is bad");
- }
- len--;
- long bit = (1 << len);
- if (len > count) {
- ERR("reread length is bad: expect " + count + " got "+ len);
- }
- else if ((bitmap & bit) != 0) {
- ERR("key already seen");
- }
- else if ((expected & bit) == 0) {
- ERR("key was not expected");
- }
- bitmap |= bit;
- expected &= ~(bit);
- }
- if (expected != 0) {
- System.out.print(" expected more keys, bitmap is: " +
- expected + "\n");
- ERR("missing keys in database");
- }
- dbcp.close();
- }
-
- void t1()
- throws DbException, FileNotFoundException
- {
- Db db = new Db(dbenv, 0);
- db.set_error_stream(System.err);
- db.set_pagesize(1024);
- db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
- Db.DB_CREATE, 0664);
-
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- db.close(0);
-
- // Reopen no longer allowed, so we create a new db.
- db = new Db(dbenv, 0);
- db.set_error_stream(System.err);
- db.set_pagesize(1024);
- db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
- Db.DB_CREATE, 0664);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- rundb(db, itemcount++);
- db.close(0);
- }
-
- // remove any existing environment or database
- void removeall(boolean use_db, boolean remove_env)
- {
- {
- try {
- if (remove_env) {
- DbEnv tmpenv = new DbEnv(0);
- tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
- }
- else if (use_db) {
- /**/
- //memory leak for this:
- Db tmpdb = new Db(null, 0);
- tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
- /**/
- }
- }
- catch (DbException dbe) {
- System.err.println("error during remove: " + dbe);
- }
- catch (FileNotFoundException dbe) {
- System.err.println("error during remove: " + dbe);
- }
- }
- check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
- if (remove_env) {
- for (int i=0; i<8; i++) {
- String fname = "__db.00" + i;
- check_file_removed(fname, true, !use_db);
- }
- }
- }
-
- boolean doall()
- {
- itemcount = 0;
- try {
- VERBOSEOUT(" Running test 1:\n");
- t1();
- VERBOSEOUT(" finished.\n");
- removeall(true, false);
- return true;
- }
- catch (DbException dbe) {
- ERR("EXCEPTION RECEIVED: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- ERR("EXCEPTION RECEIVED: " + fnfe);
- }
- return false;
- }
-
- public static void main(String args[])
- {
- int iterations = 200;
-
- for (int argcnt=0; argcnt<args.length; argcnt++) {
- String arg = args[argcnt];
- try {
- iterations = Integer.parseInt(arg);
- if (iterations < 0) {
- ERR("Usage: construct02 [-testdigits] count");
- }
- }
- catch (NumberFormatException nfe) {
- ERR("EXCEPTION RECEIVED: " + nfe);
- }
- }
-
- System.gc();
- System.runFinalization();
- VERBOSEOUT("gc complete");
- long starttotal = Runtime.getRuntime().totalMemory();
- long startfree = Runtime.getRuntime().freeMemory();
- TestConstruct02 con = null;
-
- try {
- con = new TestConstruct02();
- }
- catch (DbException dbe) {
- System.err.println("Exception: " + dbe);
- System.exit(1);
- }
- catch (java.io.FileNotFoundException fnfe) {
- System.err.println("Exception: " + fnfe);
- System.exit(1);
- }
-
- for (int i=0; i<iterations; i++) {
- if (iterations != 0) {
- VERBOSEOUT("(" + i + "/" + iterations + ") ");
- }
- VERBOSEOUT("construct02 running:\n");
- if (!con.doall()) {
- ERR("SOME TEST FAILED");
- }
- System.gc();
- System.runFinalization();
- VERBOSEOUT("gc complete");
-
- }
- con.close();
-
- System.out.print("ALL TESTS SUCCESSFUL\n");
-
- long endtotal = Runtime.getRuntime().totalMemory();
- long endfree = Runtime.getRuntime().freeMemory();
-
- System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
- System.out.println("delta for free mem: " + magnitude(endfree - startfree));
-
- return;
- }
-
- static String magnitude(long value)
- {
- final long max = 10000000;
- for (long scale = 10; scale <= max; scale *= 10) {
- if (value < scale && value > -scale)
- return "<" + scale;
- }
- return ">" + max;
- }
-}
diff --git a/storage/bdb/test/scr016/TestConstruct02.testout b/storage/bdb/test/scr016/TestConstruct02.testout
deleted file mode 100644
index 5d2041cd197..00000000000
--- a/storage/bdb/test/scr016/TestConstruct02.testout
+++ /dev/null
@@ -1,3 +0,0 @@
-ALL TESTS SUCCESSFUL
-delta for total mem: <10
-delta for free mem: <10000
diff --git a/storage/bdb/test/scr016/TestDbtFlags.java b/storage/bdb/test/scr016/TestDbtFlags.java
deleted file mode 100644
index 0e2110ad137..00000000000
--- a/storage/bdb/test/scr016/TestDbtFlags.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestDbtFlags.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.io.PrintStream;
-
-public class TestDbtFlags
-{
- private static final String FileName = "access.db";
- private int flag_value;
- private int buf_size;
- private int cur_input_line = 0;
-
- /*zippy quotes for test input*/
- static final String[] input_lines = {
- "If we shadows have offended",
- "Think but this, and all is mended",
- "That you have but slumber'd here",
- "While these visions did appear",
- "And this weak and idle theme",
- "No more yielding but a dream",
- "Gentles, do not reprehend",
- "if you pardon, we will mend",
- "And, as I am an honest Puck, if we have unearned luck",
- "Now to 'scape the serpent's tongue, we will make amends ere long;",
- "Else the Puck a liar call; so, good night unto you all.",
- "Give me your hands, if we be friends, and Robin shall restore amends."
- };
-
- public TestDbtFlags(int flag_value, int buf_size)
- {
- this.flag_value = flag_value;
- this.buf_size = buf_size;
- }
-
- public static void runWithFlags(int flag_value, int size)
- {
- String msg = "=-=-=-= Test with DBT flags " + flag_value +
- " bufsize " + size;
- System.out.println(msg);
- System.err.println(msg);
-
- try
- {
- TestDbtFlags app = new TestDbtFlags(flag_value, size);
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestDbtFlags: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestDbtFlags: " + fnfe.toString());
- System.exit(1);
- }
- }
-
- public static void main(String argv[])
- {
- runWithFlags(Db.DB_DBT_MALLOC, -1);
- runWithFlags(Db.DB_DBT_REALLOC, -1);
- runWithFlags(Db.DB_DBT_USERMEM, 20);
- runWithFlags(Db.DB_DBT_USERMEM, 50);
- runWithFlags(Db.DB_DBT_USERMEM, 200);
- runWithFlags(0, -1);
-
- System.exit(0);
- }
-
- String get_input_line()
- {
- if (cur_input_line >= input_lines.length)
- return null;
- return input_lines[cur_input_line++];
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- // Create the database object.
- // There is no environment for this simple example.
- Db table = new Db(null, 0);
- table.setErrorStream(System.err);
- table.setErrorPrefix("TestDbtFlags");
- table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- for (;;) {
- //System.err.println("input line " + cur_input_line);
- String line = get_input_line();
- if (line == null)
- break;
-
- String reversed = (new StringBuffer(line)).reverse().toString();
-
- // See definition of StringDbt below
- //
- StringDbt key = new StringDbt(line, flag_value);
- StringDbt data = new StringDbt(reversed, flag_value);
-
- try
- {
- int err;
- if ((err = table.put(null,
- key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
- System.out.println("Key " + line + " already exists.");
- }
- key.check_flags();
- data.check_flags();
- }
- catch (DbException dbe)
- {
- System.out.println(dbe.toString());
- }
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- StringDbt key = new StringDbt(flag_value, buf_size);
- StringDbt data = new StringDbt(flag_value, buf_size);
-
- int iteration_count = 0;
- int dbreturn = 0;
-
- while (dbreturn == 0) {
- //System.err.println("iteration " + iteration_count);
- try {
- if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
- System.out.println(key.get_string() + " : " + data.get_string());
- }
- }
- catch (DbMemoryException dme) {
- /* In a real application, we'd normally increase
- * the size of the buffer. Since we've created
- * this error condition for testing, we'll just report it.
- * We still need to skip over this record, and we don't
- * want to mess with our original Dbt's, since we want
- * to see more errors. So create some temporary
- * mallocing Dbts to get this record.
- */
- System.err.println("exception, iteration " + iteration_count +
- ": " + dme);
- System.err.println(" key size: " + key.getSize() +
- " ulen: " + key.getUserBufferLength());
- System.err.println(" data size: " + key.getSize() +
- " ulen: " + key.getUserBufferLength());
-
- dme.getDbt().setSize(buf_size);
- StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
- StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
- if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
- System.err.println("cannot get expected next record");
- return;
- }
- System.out.println(tempkey.get_string() + " : " +
- tempdata.get_string());
- }
- iteration_count++;
- }
- key.check_flags();
- data.check_flags();
-
- iterator.close();
- table.close(0);
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- int saved_flags;
-
- StringDbt(int flags, int buf_size)
- {
- this.saved_flags = flags;
- setFlags(saved_flags);
- if (buf_size != -1) {
- setData(new byte[buf_size]);
- setUserBufferLength(buf_size);
- }
- }
-
- StringDbt(String value, int flags)
- {
- this.saved_flags = flags;
- setFlags(saved_flags);
- set_string(value);
- }
-
- void set_string(String value)
- {
- setData(value.getBytes());
- setSize(value.length());
- check_flags();
- }
-
- String get_string()
- {
- check_flags();
- return new String(getData(), 0, getSize());
- }
-
- void check_flags()
- {
- int actual_flags = getFlags();
- if (actual_flags != saved_flags) {
- System.err.println("flags botch: expected " + saved_flags +
- ", got " + actual_flags);
- }
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testerr b/storage/bdb/test/scr016/TestDbtFlags.testerr
deleted file mode 100644
index 7666868ebd4..00000000000
--- a/storage/bdb/test/scr016/TestDbtFlags.testerr
+++ /dev/null
@@ -1,54 +0,0 @@
-=-=-=-= Test with DBT flags 4 bufsize -1
-=-=-=-= Test with DBT flags 16 bufsize -1
-=-=-=-= Test with DBT flags 32 bufsize 20
-exception, iteration 0: Dbt not large enough for available data
- key size: 28 ulen: 20
- data size: 28 ulen: 20
-exception, iteration 1: Dbt not large enough for available data
- key size: 53 ulen: 20
- data size: 53 ulen: 20
-exception, iteration 2: Dbt not large enough for available data
- key size: 55 ulen: 20
- data size: 55 ulen: 20
-exception, iteration 3: Dbt not large enough for available data
- key size: 25 ulen: 20
- data size: 25 ulen: 20
-exception, iteration 4: Dbt not large enough for available data
- key size: 69 ulen: 20
- data size: 69 ulen: 20
-exception, iteration 5: Dbt not large enough for available data
- key size: 27 ulen: 20
- data size: 27 ulen: 20
-exception, iteration 6: Dbt not large enough for available data
- key size: 28 ulen: 20
- data size: 28 ulen: 20
-exception, iteration 7: Dbt not large enough for available data
- key size: 65 ulen: 20
- data size: 65 ulen: 20
-exception, iteration 8: Dbt not large enough for available data
- key size: 32 ulen: 20
- data size: 32 ulen: 20
-exception, iteration 9: Dbt not large enough for available data
- key size: 33 ulen: 20
- data size: 33 ulen: 20
-exception, iteration 10: Dbt not large enough for available data
- key size: 30 ulen: 20
- data size: 30 ulen: 20
-exception, iteration 11: Dbt not large enough for available data
- key size: 27 ulen: 20
- data size: 27 ulen: 20
-=-=-=-= Test with DBT flags 32 bufsize 50
-exception, iteration 1: Dbt not large enough for available data
- key size: 53 ulen: 50
- data size: 53 ulen: 50
-exception, iteration 2: Dbt not large enough for available data
- key size: 55 ulen: 50
- data size: 55 ulen: 50
-exception, iteration 4: Dbt not large enough for available data
- key size: 69 ulen: 50
- data size: 69 ulen: 50
-exception, iteration 7: Dbt not large enough for available data
- key size: 65 ulen: 50
- data size: 65 ulen: 50
-=-=-=-= Test with DBT flags 32 bufsize 200
-=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testout b/storage/bdb/test/scr016/TestDbtFlags.testout
deleted file mode 100644
index b8deb1bcc16..00000000000
--- a/storage/bdb/test/scr016/TestDbtFlags.testout
+++ /dev/null
@@ -1,78 +0,0 @@
-=-=-=-= Test with DBT flags 4 bufsize -1
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
-=-=-=-= Test with DBT flags 16 bufsize -1
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
-=-=-=-= Test with DBT flags 32 bufsize 20
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
-=-=-=-= Test with DBT flags 32 bufsize 50
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
-=-=-=-= Test with DBT flags 32 bufsize 200
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
-=-=-=-= Test with DBT flags 0 bufsize -1
-And this weak and idle theme : emeht eldi dna kaew siht dnA
-And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
-Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
-Gentles, do not reprehend : dneherper ton od ,seltneG
-Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
-If we shadows have offended : dedneffo evah swodahs ew fI
-No more yielding but a dream : maerd a tub gnidleiy erom oN
-Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
-That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
-Think but this, and all is mended : dednem si lla dna ,siht tub knihT
-While these visions did appear : raeppa did snoisiv eseht elihW
-if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/storage/bdb/test/scr016/TestGetSetMethods.java b/storage/bdb/test/scr016/TestGetSetMethods.java
deleted file mode 100644
index 06a52c5ad0e..00000000000
--- a/storage/bdb/test/scr016/TestGetSetMethods.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestGetSetMethods.java,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Do some regression tests for simple get/set access methods
- * on DbEnv, DbTxn, Db. We don't currently test that they have
- * the desired effect, only that they operate and return correctly.
- */
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestGetSetMethods
-{
- public void testMethods()
- throws DbException, FileNotFoundException
- {
- DbEnv dbenv = new DbEnv(0);
- DbTxn dbtxn;
- byte[][] conflicts = new byte[10][10];
-
- dbenv.setTimeout(0x90000000,
- Db.DB_SET_LOCK_TIMEOUT);
- dbenv.setLogBufferSize(0x1000);
- dbenv.setLogDir(".");
- dbenv.setLogMax(0x10000000);
- dbenv.setLogRegionMax(0x100000);
- dbenv.setLockConflicts(conflicts);
- dbenv.setLockDetect(Db.DB_LOCK_DEFAULT);
- // exists, but is deprecated:
- // dbenv.set_lk_max(0);
- dbenv.setLockMaxLockers(100);
- dbenv.setLockMaxLocks(10);
- dbenv.setLockMaxObjects(1000);
- dbenv.setMemoryPoolMapSize(0x10000);
- dbenv.setTestAndSetSpins(1000);
-
- // Need to open the environment so we
- // can get a transaction.
- //
- dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
- Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
- Db.DB_INIT_MPOOL,
- 0644);
-
- dbtxn = dbenv.txnBegin(null, Db.DB_TXN_NOWAIT);
- dbtxn.setTimeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
- dbtxn.abort();
-
- dbenv.close(0);
-
- // We get a db, one for each type.
- // That's because once we call (for instance)
- // setBtreeMinKey, DB 'knows' that this is a
- // Btree Db, and it cannot be used to try Hash
- // or Recno functions.
- //
- Db db_bt = new Db(null, 0);
- db_bt.setBtreeMinKey(100);
- db_bt.setCacheSize(0x100000, 0);
- db_bt.close(0);
-
- Db db_h = new Db(null, 0);
- db_h.setHashFillFactor(0x10);
- db_h.setHashNumElements(100);
- db_h.setByteOrder(0);
- db_h.setPageSize(0x10000);
- db_h.close(0);
-
- Db db_re = new Db(null, 0);
- db_re.setRecordDelimiter('@');
- db_re.setRecordPad(10);
- db_re.setRecordSource("re.in");
- db_re.setRecordLength(1000);
- db_re.close(0);
-
- Db db_q = new Db(null, 0);
- db_q.setQueueExtentSize(200);
- db_q.close(0);
- }
-
- public static void main(String[] args)
- {
- try {
- TestGetSetMethods tester = new TestGetSetMethods();
- tester.testMethods();
- }
- catch (Exception e) {
- System.err.println("TestGetSetMethods: Exception: " + e);
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestKeyRange.java b/storage/bdb/test/scr016/TestKeyRange.java
deleted file mode 100644
index 57a776b014a..00000000000
--- a/storage/bdb/test/scr016/TestKeyRange.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestKeyRange.java,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
- * See comments with ADDED for specific areas of change.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.StringReader;
-import java.io.Reader;
-import java.io.IOException;
-import java.io.PrintStream;
-
-public class TestKeyRange
-{
- private static final String FileName = "access.db";
-
- public TestKeyRange()
- {
- }
-
- private static void usage()
- {
- System.err.println("usage: TestKeyRange\n");
- System.exit(1);
- }
-
- public static void main(String argv[])
- {
- try
- {
- TestKeyRange app = new TestKeyRange();
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestKeyRange: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestKeyRange: " + fnfe.toString());
- System.exit(1);
- }
- System.exit(0);
- }
-
- // Prompts for a line, and keeps prompting until a non blank
- // line is returned. Returns null on error.
- //
- static public String askForLine(Reader reader,
- PrintStream out, String prompt)
- {
- String result = "";
- while (result != null && result.length() == 0) {
- out.print(prompt);
- out.flush();
- result = getLine(reader);
- }
- return result;
- }
-
- // Not terribly efficient, but does the job.
- // Works for reading a line from stdin or a file.
- // Returns null on EOF. If EOF appears in the middle
- // of a line, returns that line, then null on next call.
- //
- static public String getLine(Reader reader)
- {
- StringBuffer b = new StringBuffer();
- int c;
- try {
- while ((c = reader.read()) != -1 && c != '\n') {
- if (c != '\r')
- b.append((char)c);
- }
- }
- catch (IOException ioe) {
- c = -1;
- }
-
- if (c == -1 && b.length() == 0)
- return null;
- else
- return b.toString();
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- // Create the database object.
- // There is no environment for this simple example.
- Db table = new Db(null, 0);
- table.setErrorStream(System.err);
- table.setErrorPrefix("TestKeyRange");
- table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
-
- int count= 0;/*ADDED*/
- for (;;) {
- String line = askForLine(reader, System.out, "input>");
- if (line == null)
- break;
-
- String reversed = (new StringBuffer(line)).reverse().toString();
-
- // See definition of StringDbt below
- //
- StringDbt key = new StringDbt(line);
- StringDbt data = new StringDbt(reversed);
-
- try
- {
- int err;
- if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
- System.out.println("Key " + line + " already exists.");
- }
- }
- catch (DbException dbe)
- {
- System.out.println(dbe.toString());
- }
- System.out.println("");
-
- /*START ADDED*/
- {
- if (count++ > 0) {
- DbKeyRange range = new DbKeyRange();
- table.keyRange(null, key, range, 0);
- System.out.println("less: " + range.less);
- System.out.println("equal: " + range.equal);
- System.out.println("greater: " + range.greater);
- }
- }
- /*END ADDED*/
-
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- StringDbt key = new StringDbt();
- StringDbt data = new StringDbt();
- while (iterator.get(key, data, Db.DB_NEXT) == 0)
- {
- System.out.println(key.getString() + " : " + data.getString());
- }
- iterator.close();
- table.close(0);
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- StringDbt()
- {
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- setData(value.getBytes());
- setSize(value.length());
- }
-
- String getString()
- {
- return new String(getData(), 0, getSize());
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestKeyRange.testout b/storage/bdb/test/scr016/TestKeyRange.testout
deleted file mode 100644
index c265f3289fb..00000000000
--- a/storage/bdb/test/scr016/TestKeyRange.testout
+++ /dev/null
@@ -1,27 +0,0 @@
-input>
-input>
-less: 0.5
-equal: 0.5
-greater: 0.0
-input>
-less: 0.6666666666666666
-equal: 0.3333333333333333
-greater: 0.0
-input>
-less: 0.5
-equal: 0.25
-greater: 0.25
-input>
-less: 0.0
-equal: 0.2
-greater: 0.8
-input>
-less: 0.0
-equal: 0.16666666666666666
-greater: 0.8333333333333334
-input>MAMAMIA : AIMAMAM
-ZED : DEZ
-abc : cba
-middle : elddim
-moremiddle : elddimerom
-zend : dnez
diff --git a/storage/bdb/test/scr016/TestLockVec.java b/storage/bdb/test/scr016/TestLockVec.java
deleted file mode 100644
index b14a20c6848..00000000000
--- a/storage/bdb/test/scr016/TestLockVec.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestLockVec.java,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * test of DbEnv.lock_vec()
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestLockVec
-{
- public static int locker1;
- public static int locker2;
-
- public static void gdb_pause()
- {
- try {
- System.err.println("attach gdb and type return...");
- System.in.read(new byte[10]);
- }
- catch (java.io.IOException ie) {
- }
- }
-
- public static void main(String[] args)
- {
- try {
- DbEnv dbenv1 = new DbEnv(0);
- DbEnv dbenv2 = new DbEnv(0);
- dbenv1.open(".",
- Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
- dbenv2.open(".",
- Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
- locker1 = dbenv1.lockId();
- locker2 = dbenv1.lockId();
- Db db1 = new Db(dbenv1, 0);
- db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
- Db db2 = new Db(dbenv2, 0);
- db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
-
- // populate our database, just two elements.
- Dbt Akey = new Dbt("A".getBytes());
- Dbt Adata = new Dbt("Adata".getBytes());
- Dbt Bkey = new Dbt("B".getBytes());
- Dbt Bdata = new Dbt("Bdata".getBytes());
-
- // We don't allow Dbts to be reused within the
- // same method call, so we need some duplicates.
- Dbt Akeyagain = new Dbt("A".getBytes());
- Dbt Bkeyagain = new Dbt("B".getBytes());
-
- db1.put(null, Akey, Adata, 0);
- db1.put(null, Bkey, Bdata, 0);
-
- Dbt notInDatabase = new Dbt("C".getBytes());
-
- /* make sure our check mechanisms work */
- int expectedErrs = 0;
-
- lock_check_free(dbenv2, Akey);
- try {
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
- }
- catch (DbException dbe1) {
- expectedErrs += 1;
- }
- DbLock tmplock = dbenv1.lockGet(locker1, Db.DB_LOCK_NOWAIT,
- Akey, Db.DB_LOCK_READ);
- lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
- try {
- lock_check_free(dbenv2, Akey);
- }
- catch (DbException dbe2) {
- expectedErrs += 2;
- }
- if (expectedErrs != 1+2) {
- System.err.println("lock check mechanism is broken");
- System.exit(1);
- }
- dbenv1.lockPut(tmplock);
-
- /* Now on with the test, a series of lock_vec requests,
- * with checks between each call.
- */
-
- System.out.println("get a few");
- /* Request: get A(W), B(R), B(R) */
- DbLockRequest[] reqs = new DbLockRequest[3];
-
- reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
- Akey, null);
- reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Bkey, null);
- reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Bkeyagain, null);
-
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
-
- /* Locks held: A(W), B(R), B(R) */
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
- lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
-
- System.out.println("put a couple");
- /* Request: put A, B(first) */
- reqs[0].setOp(Db.DB_LOCK_PUT);
- reqs[1].setOp(Db.DB_LOCK_PUT);
-
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
-
- /* Locks held: B(R) */
- lock_check_free(dbenv2, Akey);
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
-
- System.out.println("put one more, test index offset");
- /* Request: put B(second) */
- reqs[2].setOp(Db.DB_LOCK_PUT);
-
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
-
- /* Locks held: <none> */
- lock_check_free(dbenv2, Akey);
- lock_check_free(dbenv2, Bkey);
-
- System.out.println("get a few");
- /* Request: get A(R), A(R), B(R) */
- reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Akey, null);
- reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Akeyagain, null);
- reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Bkey, null);
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
-
- /* Locks held: A(R), B(R), B(R) */
- lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
-
- System.out.println("try putobj");
- /* Request: get B(R), putobj A */
- reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
- Bkey, null);
- reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
- Akey, null);
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
-
- /* Locks held: B(R), B(R) */
- lock_check_free(dbenv2, Akey);
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
-
- System.out.println("get one more");
- /* Request: get A(W) */
- reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
- Akey, null);
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
-
- /* Locks held: A(W), B(R), B(R) */
- lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
- lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
-
- System.out.println("putall");
- /* Request: putall */
- reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
- null, null);
- dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
-
- lock_check_free(dbenv2, Akey);
- lock_check_free(dbenv2, Bkey);
- db1.close(0);
- dbenv1.close(0);
- db2.close(0);
- dbenv2.close(0);
- System.out.println("done");
- }
- catch (DbLockNotGrantedException nge) {
- System.err.println("Db Exception: " + nge);
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
-
- }
-
- /* Verify that the lock is free, throw an exception if not.
- * We do this by trying to grab a write lock (no wait).
- */
- static void lock_check_free(DbEnv dbenv, Dbt dbt)
- throws DbException
- {
- DbLock tmplock = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT,
- dbt, Db.DB_LOCK_WRITE);
- dbenv.lockPut(tmplock);
- }
-
- /* Verify that the lock is held with the mode, throw an exception if not.
- * If we have a write lock, we should not be able to get the lock
- * for reading. If we have a read lock, we should be able to get
- * it for reading, but not writing.
- */
- static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
- throws DbException
- {
- DbLock never = null;
-
- try {
- if (mode == Db.DB_LOCK_WRITE) {
- never = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT,
- dbt, Db.DB_LOCK_READ);
- }
- else if (mode == Db.DB_LOCK_READ) {
- DbLock rlock = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT,
- dbt, Db.DB_LOCK_READ);
- dbenv.lockPut(rlock);
- never = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT,
- dbt, Db.DB_LOCK_WRITE);
- }
- else {
- throw new DbException("lock_check_held bad mode");
- }
- }
- catch (DbLockNotGrantedException nge) {
- /* We expect this on our last lock_get call */
- }
-
- /* make sure we failed */
- if (never != null) {
- try {
- dbenv.lockPut(never);
- }
- catch (DbException dbe2) {
- System.err.println("Got some real troubles now");
- System.exit(1);
- }
- throw new DbException("lock_check_held: lock was not held");
- }
- }
-
-}
diff --git a/storage/bdb/test/scr016/TestLockVec.testout b/storage/bdb/test/scr016/TestLockVec.testout
deleted file mode 100644
index 1cf16c6ac4e..00000000000
--- a/storage/bdb/test/scr016/TestLockVec.testout
+++ /dev/null
@@ -1,8 +0,0 @@
-get a few
-put a couple
-put one more, test index offset
-get a few
-try putobj
-get one more
-putall
-done
diff --git a/storage/bdb/test/scr016/TestLogc.java b/storage/bdb/test/scr016/TestLogc.java
deleted file mode 100644
index 8f406831829..00000000000
--- a/storage/bdb/test/scr016/TestLogc.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestLogc.java,v 1.9 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * A basic regression test for the Logc class.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestLogc
-{
- public static void main(String[] args)
- {
- try {
- DbEnv env = new DbEnv(0);
- env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
-
- // Do some database activity to get something into the log.
- Db db1 = new Db(env, 0);
- db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
- db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
- db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
- db1.close(0);
-
- Db db2 = new Db(env, 0);
- db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
- db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
- db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
- db2.close(0);
-
- // Now get a log cursor and walk through.
- DbLogc logc = env.log_cursor(0);
-
- int ret = 0;
- DbLsn lsn = new DbLsn();
- Dbt dbt = new Dbt();
- int flags = Db.DB_FIRST;
-
- int count = 0;
- while ((ret = logc.get(lsn, dbt, flags)) == 0) {
-
- // We ignore the contents of the log record,
- // it's not portable. Even the exact count
- // is may change when the underlying implementation
- // changes, we'll just make sure at the end we saw
- // 'enough'.
- //
- // System.out.println("logc.get: " + count);
- // System.out.println(showDbt(dbt));
- //
- count++;
- flags = Db.DB_NEXT;
- }
- if (ret != Db.DB_NOTFOUND) {
- System.err.println("*** FAIL: logc.get returned: " +
- DbEnv.strerror(ret));
- }
- logc.close(0);
-
- // There has to be at *least* four log records,
- // since we did four separate database operations.
- //
- if (count < 4)
- System.out.println("*** FAIL: not enough log records");
-
- System.out.println("TestLogc done.");
- }
- catch (DbException dbe) {
- System.err.println("*** FAIL: Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
- }
-
- }
-
- public static String showDbt(Dbt dbt)
- {
- StringBuffer sb = new StringBuffer();
- int size = dbt.get_size();
- byte[] data = dbt.get_data();
- int i;
- for (i=0; i<size && i<10; i++) {
- sb.append(Byte.toString(data[i]));
- sb.append(' ');
- }
- if (i<size)
- sb.append("...");
- return "size: " + size + " data: " + sb.toString();
- }
-}
diff --git a/storage/bdb/test/scr016/TestLogc.testout b/storage/bdb/test/scr016/TestLogc.testout
deleted file mode 100644
index afac3af7eda..00000000000
--- a/storage/bdb/test/scr016/TestLogc.testout
+++ /dev/null
@@ -1 +0,0 @@
-TestLogc done.
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.java b/storage/bdb/test/scr016/TestOpenEmpty.java
deleted file mode 100644
index ab5fb5a8d92..00000000000
--- a/storage/bdb/test/scr016/TestOpenEmpty.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestOpenEmpty.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.io.PrintStream;
-
-public class TestOpenEmpty
-{
- private static final String FileName = "access.db";
-
- public TestOpenEmpty()
- {
- }
-
- private static void usage()
- {
- System.err.println("usage: TestOpenEmpty\n");
- System.exit(1);
- }
-
- public static void main(String argv[])
- {
- try
- {
- TestOpenEmpty app = new TestOpenEmpty();
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestOpenEmpty: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestOpenEmpty: " + fnfe.toString());
- System.exit(1);
- }
- catch (Exception ex)
- {
- System.err.println("TestOpenEmpty: " + ex.toString());
- System.exit(1);
- }
- System.exit(0);
- }
-
- // Prompts for a line, and keeps prompting until a non blank
- // line is returned. Returns null on error.
- //
- static public String askForLine(InputStreamReader reader,
- PrintStream out, String prompt)
- {
- String result = "";
- while (result != null && result.length() == 0) {
- out.print(prompt);
- out.flush();
- result = getLine(reader);
- }
- return result;
- }
-
- // Not terribly efficient, but does the job.
- // Works for reading a line from stdin or a file.
- // Returns null on EOF. If EOF appears in the middle
- // of a line, returns that line, then null on next call.
- //
- static public String getLine(InputStreamReader reader)
- {
- StringBuffer b = new StringBuffer();
- int c;
- try {
- while ((c = reader.read()) != -1 && c != '\n') {
- if (c != '\r')
- b.append((char)c);
- }
- }
- catch (IOException ioe) {
- c = -1;
- }
-
- if (c == -1 && b.length() == 0)
- return null;
- else
- return b.toString();
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- try {
- FileOutputStream os = new FileOutputStream(FileName);
- os.write("abc".getBytes());
- os.close();
- }
- catch (IOException ioe) { }
-
- // Create the database object.
- // There is no environment for this simple example.
- Db table = new Db(null, 0);
- table.setErrorStream(System.err);
- table.setErrorPrefix("TestOpenEmpty");
- table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- InputStreamReader reader = new InputStreamReader(System.in);
-
- for (;;) {
- String line = askForLine(reader, System.out, "input> ");
- if (line == null)
- break;
-
- String reversed = (new StringBuffer(line)).reverse().toString();
-
- // See definition of StringDbt below
- //
- StringDbt key = new StringDbt(line);
- StringDbt data = new StringDbt(reversed);
-
- try
- {
- int err;
- if ((err = table.put(null,
- key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
- System.out.println("Key " + line + " already exists.");
- }
- }
- catch (DbException dbe)
- {
- System.out.println(dbe.toString());
- }
- System.out.println("");
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- StringDbt key = new StringDbt();
- StringDbt data = new StringDbt();
- while (iterator.get(key, data, Db.DB_NEXT) == 0)
- {
- System.out.println(key.getString() + " : " + data.getString());
- }
- iterator.close();
- table.close(0);
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- StringDbt()
- {
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- setData(value.getBytes());
- setSize(value.length());
- }
-
- String getString()
- {
- return new String(getData(), 0, getSize());
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.testerr b/storage/bdb/test/scr016/TestOpenEmpty.testerr
deleted file mode 100644
index c08da27b36e..00000000000
--- a/storage/bdb/test/scr016/TestOpenEmpty.testerr
+++ /dev/null
@@ -1,2 +0,0 @@
-TestOpenEmpty: access.db: unexpected file type or format
-TestOpenEmpty: java.lang.IllegalArgumentException: Invalid argument
diff --git a/storage/bdb/test/scr016/TestReplication.java b/storage/bdb/test/scr016/TestReplication.java
deleted file mode 100644
index e20b9a92901..00000000000
--- a/storage/bdb/test/scr016/TestReplication.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestReplication.java,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test of replication, merely to exercise the individual
- * methods in the API. Rather than use TCP/IP, our transport
- * mechanism is just an ArrayList of byte arrays.
- * It's managed like a queue, and synchronization is via
- * the ArrayList object itself and java's wait/notify.
- * It's not terribly extensible, but it's fine for a small test.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Vector;
-
-public class TestReplication extends Thread
- implements DbRepTransport
-{
- public static final String MASTER_ENVDIR = "./master";
- public static final String CLIENT_ENVDIR = "./client";
-
- private Vector queue = new Vector();
- private DbEnv master_env;
- private DbEnv client_env;
-
- private static void mkdir(String name)
- throws IOException
- {
- (new File(name)).mkdir();
- }
-
-
- // The client thread runs this
- public void run()
- {
- try {
- System.err.println("c10");
- client_env = new DbEnv(0);
- System.err.println("c11");
- client_env.set_rep_transport(1, this);
- System.err.println("c12");
- client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
- System.err.println("c13");
- Dbt myid = new Dbt("master01".getBytes());
- System.err.println("c14");
- client_env.rep_start(myid, Db.DB_REP_CLIENT);
- System.err.println("c15");
- DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
- processMsg.envid = 2;
- System.err.println("c20");
- boolean running = true;
-
- Dbt control = new Dbt();
- Dbt rec = new Dbt();
-
- while (running) {
- int msgtype = 0;
-
- System.err.println("c30");
- synchronized (queue) {
- if (queue.size() == 0) {
- System.err.println("c40");
- sleepShort();
- }
- else {
- msgtype = ((Integer)queue.firstElement()).intValue();
- queue.removeElementAt(0);
- byte[] data;
-
- System.err.println("c50 " + msgtype);
-
- switch (msgtype) {
- case -1:
- running = false;
- break;
- case 1:
- data = (byte[])queue.firstElement();
- queue.removeElementAt(0);
- control.set_data(data);
- control.set_size(data.length);
- break;
- case 2:
- control.set_data(null);
- control.set_size(0);
- break;
- case 3:
- data = (byte[])queue.firstElement();
- queue.removeElementAt(0);
- rec.set_data(data);
- rec.set_size(data.length);
- break;
- case 4:
- rec.set_data(null);
- rec.set_size(0);
- break;
- }
-
- }
- }
- System.err.println("c60");
- if (msgtype == 3 || msgtype == 4) {
- System.out.println("client: Got message");
- client_env.rep_process_message(control, rec,
- processMsg);
- }
- }
- System.err.println("c70");
- Db db = new Db(client_env, 0);
- db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
- Dbt data = new Dbt();
- System.err.println("c80");
- db.get(null, new Dbt("Hello".getBytes()), data, 0);
- System.err.println("c90");
- System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
- System.err.println("c95");
- client_env.close(0);
- }
- catch (Exception e) {
- System.err.println("client exception: " + e);
- }
- }
-
- // Implements DbTransport
- public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
- throws DbException
- {
- System.out.println("Send to " + envid);
- if (envid == 1) {
- System.err.println("Unexpected envid = " + envid);
- return 0;
- }
-
- int nbytes = 0;
-
- synchronized (queue) {
- System.out.println("Sending message");
- byte[] data = control.get_data();
- if (data != null && data.length > 0) {
- queue.addElement(new Integer(1));
- nbytes += data.length;
- byte[] newdata = new byte[data.length];
- System.arraycopy(data, 0, newdata, 0, data.length);
- queue.addElement(newdata);
- }
- else
- {
- queue.addElement(new Integer(2));
- }
-
- data = rec.get_data();
- if (data != null && data.length > 0) {
- queue.addElement(new Integer(3));
- nbytes += data.length;
- byte[] newdata = new byte[data.length];
- System.arraycopy(data, 0, newdata, 0, data.length);
- queue.addElement(newdata);
- }
- else
- {
- queue.addElement(new Integer(4));
- }
- System.out.println("MASTER: sent message");
- }
- return 0;
- }
-
- public void sleepShort()
- {
- try {
- sleep(100);
- }
- catch (InterruptedException ie)
- {
- }
- }
-
- public void send_terminator()
- {
- synchronized (queue) {
- queue.addElement(new Integer(-1));
- }
- }
-
- public void master()
- {
- try {
- master_env = new DbEnv(0);
- master_env.set_rep_transport(2, this);
- master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
- System.err.println("10");
- Dbt myid = new Dbt("client01".getBytes());
- master_env.rep_start(myid, Db.DB_REP_MASTER);
- System.err.println("10");
- Db db = new Db(master_env, 0);
- System.err.println("20");
- db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
- System.err.println("30");
- db.put(null, new Dbt("Hello".getBytes()),
- new Dbt("world".getBytes()), 0);
- System.err.println("40");
- //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
- //master_env.rep_elect(2, 2, 3, 4, electionResult);
- db.close(0);
- System.err.println("50");
- master_env.close(0);
- send_terminator();
- }
- catch (Exception e) {
- System.err.println("client exception: " + e);
- }
- }
-
- public static void main(String[] args)
- {
- // The test should only take a few milliseconds.
- // give it 10 seconds before bailing out.
- TimelimitThread t = new TimelimitThread(1000*10);
- t.start();
-
- try {
- mkdir(CLIENT_ENVDIR);
- mkdir(MASTER_ENVDIR);
-
- TestReplication rep = new TestReplication();
-
- // Run the client as a seperate thread.
- rep.start();
-
- // Run the master.
- rep.master();
-
- // Wait for the master to finish.
- rep.join();
- }
- catch (Exception e)
- {
- System.err.println("Exception: " + e);
- }
- t.finished();
- }
-
-}
-
-class TimelimitThread extends Thread
-{
- long nmillis;
- boolean finished = false;
-
- TimelimitThread(long nmillis)
- {
- this.nmillis = nmillis;
- }
-
- public void finished()
- {
- finished = true;
- }
-
- public void run()
- {
- long targetTime = System.currentTimeMillis() + nmillis;
- long curTime;
-
- while (!finished &&
- ((curTime = System.currentTimeMillis()) < targetTime)) {
- long diff = targetTime - curTime;
- if (diff > 100)
- diff = 100;
- try {
- sleep(diff);
- }
- catch (InterruptedException ie) {
- }
- }
- System.err.println("");
- System.exit(1);
- }
-}
diff --git a/storage/bdb/test/scr016/TestRpcServer.java b/storage/bdb/test/scr016/TestRpcServer.java
deleted file mode 100644
index ee040f0cfd6..00000000000
--- a/storage/bdb/test/scr016/TestRpcServer.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestRpcServer.java,v 1.5 2004/01/28 03:36:34 bostic Exp $
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.io.IOException;
-import java.io.PrintStream;
-
-public class TestRpcServer
-{
- private static final String FileName = "access.db";
-
- public TestRpcServer()
- {
- }
-
- private static void usage()
- {
- System.err.println("usage: TestRpcServer\n");
- System.exit(1);
- }
-
- public static void main(String argv[])
- {
- try
- {
- TestRpcServer app = new TestRpcServer();
- app.run();
- }
- catch (DbException dbe)
- {
- System.err.println("TestRpcServer: " + dbe.toString());
- System.exit(1);
- }
- catch (FileNotFoundException fnfe)
- {
- System.err.println("TestRpcServer: " + fnfe.toString());
- System.exit(1);
- }
- System.exit(0);
- }
-
- // Prompts for a line, and keeps prompting until a non blank
- // line is returned. Returns null on error.
- //
- static public String askForLine(Reader reader,
- PrintStream out, String prompt)
- {
- String result = "";
- while (result != null && result.length() == 0) {
- out.print(prompt);
- out.flush();
- result = getLine(reader);
- }
- return result;
- }
-
- // Not terribly efficient, but does the job.
- // Works for reading a line from stdin or a file.
- // Returns null on EOF. If EOF appears in the middle
- // of a line, returns that line, then null on next call.
- //
- static public String getLine(Reader reader)
- {
- StringBuffer b = new StringBuffer();
- int c;
- try {
- while ((c = reader.read()) != -1 && c != '\n') {
- if (c != '\r')
- b.append((char)c);
- }
- }
- catch (IOException ioe) {
- c = -1;
- }
-
- if (c == -1 && b.length() == 0)
- return null;
- else
- return b.toString();
- }
-
- public void run()
- throws DbException, FileNotFoundException
- {
- // Remove the previous database.
- new File(FileName).delete();
-
- DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
- dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
- dbenv.open(".", Db.DB_CREATE, 0644);
- System.out.println("server connection set");
-
- // Create the database object.
- // There is no environment for this simple example.
- Db table = new Db(dbenv, 0);
- table.set_error_stream(System.err);
- table.set_errpfx("TestRpcServer");
- table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- //
- // Insert records into the database, where the key is the user
- // input and the data is the user input in reverse order.
- //
- Reader reader =
- new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
-
- for (;;) {
- String line = askForLine(reader, System.out, "input> ");
- if (line == null)
- break;
-
- String reversed = (new StringBuffer(line)).reverse().toString();
-
- // See definition of StringDbt below
- //
- StringDbt key = new StringDbt(line);
- StringDbt data = new StringDbt(reversed);
-
- try
- {
- int err;
- if ((err = table.put(null,
- key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
- System.out.println("Key " + line + " already exists.");
- }
- }
- catch (DbException dbe)
- {
- System.out.println(dbe.toString());
- }
- System.out.println("");
- }
-
- // Acquire an iterator for the table.
- Dbc iterator;
- iterator = table.cursor(null, 0);
-
- // Walk through the table, printing the key/data pairs.
- // See class StringDbt defined below.
- //
- StringDbt key = new StringDbt();
- StringDbt data = new StringDbt();
- while (iterator.get(key, data, Db.DB_NEXT) == 0)
- {
- System.out.println(key.getString() + " : " + data.getString());
- }
- iterator.close();
- table.close(0);
- }
-
- // Here's an example of how you can extend a Dbt in a straightforward
- // way to allow easy storage/retrieval of strings, or whatever
- // kind of data you wish. We've declared it as a static inner
- // class, but it need not be.
- //
- static /*inner*/
- class StringDbt extends Dbt
- {
- StringDbt()
- {
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- StringDbt(String value)
- {
- setString(value);
- set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
- }
-
- void setString(String value)
- {
- set_data(value.getBytes());
- set_size(value.length());
- }
-
- String getString()
- {
- return new String(get_data(), 0, get_size());
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestSameDbt.java b/storage/bdb/test/scr016/TestSameDbt.java
deleted file mode 100644
index 28a46af31e9..00000000000
--- a/storage/bdb/test/scr016/TestSameDbt.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestSameDbt.java,v 1.6 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test for get/put of specific values.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestSameDbt
-{
- public static void main(String[] args)
- {
- try {
- Db db = new Db(null, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- // try reusing the dbt
- Dbt keydatadbt = new Dbt("stuff".getBytes());
- int gotexcept = 0;
-
- try {
- db.put(null, keydatadbt, keydatadbt, 0);
- }
- catch (DbException dbe) {
- System.out.println("got expected Db Exception: " + dbe);
- gotexcept++;
- }
-
- if (gotexcept != 1) {
- System.err.println("Missed exception");
- System.out.println("** FAIL **");
- }
- else {
- System.out.println("Test succeeded.");
- }
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
-
- }
-
-}
diff --git a/storage/bdb/test/scr016/TestSameDbt.testout b/storage/bdb/test/scr016/TestSameDbt.testout
deleted file mode 100644
index be4bbbe59e9..00000000000
--- a/storage/bdb/test/scr016/TestSameDbt.testout
+++ /dev/null
@@ -1,2 +0,0 @@
-got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
-Test succeeded.
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.java b/storage/bdb/test/scr016/TestSimpleAccess.java
deleted file mode 100644
index 72886b5d240..00000000000
--- a/storage/bdb/test/scr016/TestSimpleAccess.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestSimpleAccess.java,v 1.7 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test for get/put of specific values.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestSimpleAccess
-{
- public static void main(String[] args)
- {
- try {
- Db db = new Db(null, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- TestUtil.populate(db);
- System.out.println("finished test");
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.testout b/storage/bdb/test/scr016/TestSimpleAccess.testout
deleted file mode 100644
index dc88d4788e4..00000000000
--- a/storage/bdb/test/scr016/TestSimpleAccess.testout
+++ /dev/null
@@ -1,3 +0,0 @@
-got data: data
-get using bad key: DB_NOTFOUND: No matching key/data pair found
-finished test
diff --git a/storage/bdb/test/scr016/TestStat.java b/storage/bdb/test/scr016/TestStat.java
deleted file mode 100644
index 84add875e04..00000000000
--- a/storage/bdb/test/scr016/TestStat.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestStat.java,v 1.10 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test for get/put of specific values.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestStat
-{
- public static void main(String[] args)
- {
- int envflags =
- Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
- Db.DB_INIT_REP | Db.DB_INIT_TXN | Db.DB_CREATE;
- try {
- DbEnv dbenv = new DbEnv(0);
- dbenv.open(".", envflags, 0);
-
- // Use a separate environment that has no activity
- // to do the replication stats. We don't want to get
- // into configuring a real replication environment here.
- DbEnv repenv = new DbEnv(0);
- repenv.open(".", envflags, 0);
-
- // Keep a couple transactions open so DbTxnStat active
- // array will have some entries.
- DbTxn dbtxn1 = dbenv.txnBegin(null, 0);
- DbTxn dbtxn2 = dbenv.txnBegin(dbtxn1, 0);
- Db db = new Db(dbenv, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
-
- TestUtil.populate(db);
- System.out.println("BtreeStat:");
- DbBtreeStat stat = (DbBtreeStat)db.stat(0);
- System.out.println(" bt_magic: " + stat.bt_magic);
-
- System.out.println("LogStat:");
- DbLogStat logstat = dbenv.logStat(0);
- System.out.println(" st_magic: " + logstat.st_magic);
- System.out.println(" st_cur_file: " + logstat.st_cur_file);
-
- System.out.println("TxnStat:");
- DbTxnStat txnstat = dbenv.txnStat(0);
- System.out.println(" st_ncommits: " + txnstat.st_ncommits);
- System.out.println(" st_nactive: " + txnstat.st_nactive);
-
- DbTxnStat.Active active0 = txnstat.st_txnarray[0];
- DbTxnStat.Active active1 = txnstat.st_txnarray[1];
- if (active0.txnid != active1.parentid &&
- active1.txnid != active0.parentid) {
- System.out.println("Missing PARENT/CHILD txn relationship:");
- System.out.println(" st_active[0].txnid: " + active0.txnid);
- System.out.println(" st_active[0].parentid: " +
- active0.parentid);
- System.out.println(" st_active[1].txnid: " + active1.txnid);
- System.out.println(" st_active[1].parentid: " +
- active1.parentid);
- }
-
- System.out.println("DbMpoolStat:");
- DbMpoolStat mpstat = dbenv.memoryPoolStat(0);
- System.out.println(" st_gbytes: " + mpstat.st_gbytes);
-
- System.out.println("DbMpoolFileStat:");
- DbMpoolFStat[] mpfstat = dbenv.memoryPoolFileStat(0);
- System.out.println(" num files: " + mpfstat.length);
-
- System.out.println("RepStat:");
- DbRepStat repstat = repenv.replicationStat(0);
- System.out.println(" st_status: " + repstat.st_status);
- System.out.println(" st_log_duplication: " +
- repstat.st_log_duplicated);
-
- System.out.println("finished test");
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestStat.testout b/storage/bdb/test/scr016/TestStat.testout
deleted file mode 100644
index bd21aa7638d..00000000000
--- a/storage/bdb/test/scr016/TestStat.testout
+++ /dev/null
@@ -1,18 +0,0 @@
-got data: data
-get using bad key: DB_NOTFOUND: No matching key/data pair found
-BtreeStat:
- bt_magic: 340322
-LogStat:
- st_magic: 264584
- st_cur_file: 1
-TxnStat:
- st_ncommits: 0
- st_nactive: 2
-DbMpoolStat:
- st_gbytes: 0
-DbMpoolFileStat:
- num files: 1
-RepStat:
- st_status: 0
- st_log_duplication: 0
-finished test
diff --git a/storage/bdb/test/scr016/TestTruncate.java b/storage/bdb/test/scr016/TestTruncate.java
deleted file mode 100644
index c84f68db4ee..00000000000
--- a/storage/bdb/test/scr016/TestTruncate.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestTruncate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test for get/put of specific values.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestTruncate
-{
- public static void main(String[] args)
- {
- try {
- Db db = new Db(null, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
-
- // populate our massive database.
- Dbt keydbt = new Dbt("key".getBytes());
- Dbt datadbt = new Dbt("data".getBytes());
- db.put(null, keydbt, datadbt, 0);
-
- // Now, retrieve. We could use keydbt over again,
- // but that wouldn't be typical in an application.
- Dbt goodkeydbt = new Dbt("key".getBytes());
- Dbt badkeydbt = new Dbt("badkey".getBytes());
- Dbt resultdbt = new Dbt();
- resultdbt.setFlags(Db.DB_DBT_MALLOC);
-
- int ret;
-
- if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
- System.out.println("get: " + DbEnv.strerror(ret));
- }
- else {
- String result =
- new String(resultdbt.getData(), 0, resultdbt.getSize());
- System.out.println("got data: " + result);
- }
-
- if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- System.out.println("get using bad key: " + DbEnv.strerror(ret));
- }
- else {
- String result =
- new String(resultdbt.getData(), 0, resultdbt.getSize());
- System.out.println("*** got data using bad key!!: " + result);
- }
-
- // Now, truncate and make sure that it's really gone.
- System.out.println("truncating data...");
- int nrecords = db.truncate(null, 0);
- System.out.println("truncate returns " + nrecords);
- if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- System.out.println("after truncate get: " +
- DbEnv.strerror(ret));
- }
- else {
- String result =
- new String(resultdbt.getData(), 0, resultdbt.getSize());
- System.out.println("got data: " + result);
- }
-
- db.close(0);
- System.out.println("finished test");
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- }
-
- }
-
-}
diff --git a/storage/bdb/test/scr016/TestTruncate.testout b/storage/bdb/test/scr016/TestTruncate.testout
deleted file mode 100644
index 0a4bc98165d..00000000000
--- a/storage/bdb/test/scr016/TestTruncate.testout
+++ /dev/null
@@ -1,6 +0,0 @@
-got data: data
-get using bad key: DB_NOTFOUND: No matching key/data pair found
-truncating data...
-truncate returns 1
-after truncate get: DB_NOTFOUND: No matching key/data pair found
-finished test
diff --git a/storage/bdb/test/scr016/TestUtil.java b/storage/bdb/test/scr016/TestUtil.java
deleted file mode 100644
index 799d326cde7..00000000000
--- a/storage/bdb/test/scr016/TestUtil.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestUtil.java,v 1.5 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Utilities used by many tests.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import java.io.FileNotFoundException;
-
-public class TestUtil
-{
- public static void populate(Db db)
- throws DbException
- {
- // populate our massive database.
- Dbt keydbt = new Dbt("key".getBytes());
- Dbt datadbt = new Dbt("data".getBytes());
- db.put(null, keydbt, datadbt, 0);
-
- // Now, retrieve. We could use keydbt over again,
- // but that wouldn't be typical in an application.
- Dbt goodkeydbt = new Dbt("key".getBytes());
- Dbt badkeydbt = new Dbt("badkey".getBytes());
- Dbt resultdbt = new Dbt();
- resultdbt.setFlags(Db.DB_DBT_MALLOC);
-
- int ret;
-
- if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
- System.out.println("get: " + DbEnv.strerror(ret));
- }
- else {
- String result =
- new String(resultdbt.getData(), 0, resultdbt.getSize());
- System.out.println("got data: " + result);
- }
-
- if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
- // We expect this...
- System.out.println("get using bad key: " + DbEnv.strerror(ret));
- }
- else {
- String result =
- new String(resultdbt.getData(), 0, resultdbt.getSize());
- System.out.println("*** got data using bad key!!: " + result);
- }
- }
-}
diff --git a/storage/bdb/test/scr016/TestXAServlet.java b/storage/bdb/test/scr016/TestXAServlet.java
deleted file mode 100644
index 29545b3b93a..00000000000
--- a/storage/bdb/test/scr016/TestXAServlet.java
+++ /dev/null
@@ -1,313 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- * Sleepycat Software. All rights reserved.
- *
- * $Id: TestXAServlet.java,v 1.4 2004/01/28 03:36:34 bostic Exp $
- */
-
-/*
- * Simple test of XA, using WebLogic.
- */
-
-package com.sleepycat.test;
-
-import com.sleepycat.db.*;
-import com.sleepycat.db.xa.*;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.Hashtable;
-import javax.servlet.*;
-import javax.servlet.http.*;
-import javax.transaction.*;
-import javax.transaction.xa.*;
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import weblogic.transaction.TxHelper;
-import weblogic.transaction.TransactionManager;
-
-public class TestXAServlet extends HttpServlet
-{
- public static final String ENV_HOME = "TESTXADIR";
- public static final String DEFAULT_URL = "t3://localhost:7001";
- public static String filesep = System.getProperty("file.separator");
-
- private static TransactionManager tm;
- private static DbXAResource xaresource;
- private static boolean initialized = false;
-
- /**
- * Utility to remove files recursively.
- */
- public static void removeRecursive(File f)
- {
- if (f.isDirectory()) {
- String[] sub = f.list();
- for (int i=0; i<sub.length; i++)
- removeRecursive(new File(f.getName() + filesep + sub[i]));
- }
- f.delete();
- }
-
- /**
- * Typically done only once, unless shutdown is invoked. This
- * sets up directories, and removes any work files from previous
- * runs. Also establishes a transaction manager that we'll use
- * for various transactions. Each call opens/creates a new DB
- * environment in our work directory.
- */
- public static synchronized void startup()
- {
- if (initialized)
- return;
-
- try {
- File dir = new File(ENV_HOME);
- removeRecursive(dir);
- dir.mkdirs();
-
- System.out.println("Getting context");
- InitialContext ic = getInitialContext(DEFAULT_URL);
- System.out.println("Creating XAResource");
- xaresource = new DbXAResource(ENV_HOME, 77, 0);
- System.out.println("Registering with transaction manager");
- tm = TxHelper.getTransactionManager();
- tm.registerStaticResource("DbXA", xaresource);
- initialized = true;
- }
- catch (Exception e) {
- System.err.println("Exception: " + e);
- e.printStackTrace();
- }
- initialized = true;
- }
-
- /**
- * Closes the XA resource manager.
- */
- public static synchronized void shutdown(PrintWriter out)
- throws XAException
- {
- if (!initialized)
- return;
-
- out.println("Closing the resource.");
- xaresource.close(0);
- out.println("Shutdown complete.");
- initialized = false;
- }
-
-
- /**
- * Should be called once per chunk of major activity.
- */
- public void initialize()
- {
- startup();
- }
-
- private static int count = 1;
- private static boolean debugInited = false;
- private Xid bogusXid;
-
- public static synchronized int incrCount()
- {
- return count++;
- }
-
- public void debugSetup(PrintWriter out)
- throws ServletException, IOException
- {
- try {
- Db.load_db();
- }
- catch (Exception e) {
- out.println("got exception during load: " + e);
- System.out.println("got exception during load: " + e);
- }
- out.println("The servlet has been restarted, and Berkeley DB is loaded");
- out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
- }
-
- public void doXATransaction(PrintWriter out, String key, String value,
- String operation)
- throws ServletException, IOException
- {
- try {
- int counter = incrCount();
- if (key == null || key.equals(""))
- key = "key" + counter;
- if (value == null || value.equals(""))
- value = "value" + counter;
-
- out.println("Adding (\"" + key + "\", \"" + value + "\")");
-
- System.out.println("XA transaction begin");
- tm.begin();
- System.out.println("getting XA transaction");
- DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
- DbTxn txn = attach.get_txn();
- DbEnv env = attach.get_env();
- Db db = new Db(env, 0);
- db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
- System.out.println("DB put " + key);
- db.put(txn,
- new Dbt(key.getBytes()),
- new Dbt(value.getBytes()),
- 0);
-
- if (operation.equals("rollback")) {
- out.println("<p>ROLLBACK");
- System.out.println("XA transaction rollback");
- tm.rollback();
- System.out.println("XA rollback returned");
-
- // The old db is no good after the rollback
- // since the open was part of the transaction.
- // Get another db for the cursor dump
- //
- db = new Db(env, 0);
- db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
- }
- else {
- out.println("<p>COMMITTED");
- System.out.println("XA transaction commit");
- tm.commit();
- }
-
- // Show the current state of the database.
- Dbc dbc = db.cursor(null, 0);
- Dbt gotkey = new Dbt();
- Dbt gotdata = new Dbt();
-
- out.println("<p>Current database values:");
- while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
- out.println("<br> " + getDbtString(gotkey) + " : "
- + getDbtString(gotdata));
- }
- dbc.close();
- db.close(0);
- }
- catch (DbException dbe) {
- System.err.println("Db Exception: " + dbe);
- out.println(" *** Exception received: " + dbe);
- dbe.printStackTrace();
- }
- catch (FileNotFoundException fnfe) {
- System.err.println("FileNotFoundException: " + fnfe);
- out.println(" *** Exception received: " + fnfe);
- fnfe.printStackTrace();
- }
- // Includes SystemException, NotSupportedException, RollbackException
- catch (Exception e) {
- System.err.println("Exception: " + e);
- out.println(" *** Exception received: " + e);
- e.printStackTrace();
- }
- }
-
- private static Xid getBogusXid()
- throws XAException
- {
- return new DbXid(1, "BOGUS_gtrid".getBytes(),
- "BOGUS_bqual".getBytes());
- }
-
- private static String getDbtString(Dbt dbt)
- {
- return new String(dbt.get_data(), 0, dbt.get_size());
- }
-
- /**
- * doGet is called as a result of invoking the servlet.
- */
- public void doGet(HttpServletRequest req, HttpServletResponse resp)
- throws ServletException, IOException
- {
- try {
- resp.setContentType("text/html");
- PrintWriter out = resp.getWriter();
-
- String key = req.getParameter("key");
- String value = req.getParameter("value");
- String operation = req.getParameter("operation");
-
- out.println("<HTML>");
- out.println("<HEAD>");
- out.println("<TITLE>Berkeley DB with XA</TITLE>");
- out.println("</HEAD><BODY>");
- out.println("<a href=\"TestXAServlet" +
- "\">Database put and commit</a><br>");
- out.println("<a href=\"TestXAServlet?operation=rollback" +
- "\">Database put and rollback</a><br>");
- out.println("<a href=\"TestXAServlet?operation=close" +
- "\">Close the XA resource manager</a><br>");
- out.println("<a href=\"TestXAServlet?operation=forget" +
- "\">Forget an operation (bypasses TM)</a><br>");
- out.println("<a href=\"TestXAServlet?operation=prepare" +
- "\">Prepare an operation (bypasses TM)</a><br>");
- out.println("<br>");
-
- if (!debugInited) {
- // Don't initialize XA yet, give the user
- // a chance to attach a debugger if necessary.
- debugSetup(out);
- debugInited = true;
- }
- else {
- initialize();
- if (operation == null)
- operation = "commit";
-
- if (operation.equals("close")) {
- shutdown(out);
- }
- else if (operation.equals("forget")) {
- // A bogus test, we just make sure the API is callable.
- out.println("<p>FORGET");
- System.out.println("XA forget bogus XID (bypass TM)");
- xaresource.forget(getBogusXid());
- }
- else if (operation.equals("prepare")) {
- // A bogus test, we just make sure the API is callable.
- out.println("<p>PREPARE");
- System.out.println("XA prepare bogus XID (bypass TM)");
- xaresource.prepare(getBogusXid());
- }
- else {
- // commit, rollback, prepare, forget
- doXATransaction(out, key, value, operation);
- }
- }
- out.println("</BODY></HTML>");
-
- System.out.println("Finished.");
- }
- // Includes SystemException, NotSupportedException, RollbackException
- catch (Exception e) {
- System.err.println("Exception: " + e);
- e.printStackTrace();
- }
-
- }
-
-
- /**
- * From weblogic's sample code:
- * samples/examples/jta/jmsjdbc/Client.java
- */
- private static InitialContext getInitialContext(String url)
- throws NamingException
- {
- Hashtable env = new Hashtable();
- env.put(Context.INITIAL_CONTEXT_FACTORY,
- "weblogic.jndi.WLInitialContextFactory");
- env.put(Context.PROVIDER_URL, url);
- return new InitialContext(env);
- }
-
-}
diff --git a/storage/bdb/test/scr016/chk.javatests b/storage/bdb/test/scr016/chk.javatests
deleted file mode 100644
index 2e2217dc720..00000000000
--- a/storage/bdb/test/scr016/chk.javatests
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.javatests,v 1.6 2003/11/21 02:35:46 bostic Exp $
-#
-# Check to make sure that regression tests for Java run.
-
-TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
-JAVA=${JAVA:-java}
-JAVAC=${JAVAC:-javac}
-
-# CLASSPATH is used by javac and java.
-# We use CLASSPATH rather than the -classpath command line option
-# because the latter behaves differently from JDK1.1 and JDK1.2
-export CLASSPATH="./classes:../db.jar"
-export LD_LIBRARY_PATH="../.libs"
-
-
-# All paths must be relative to a subdirectory of the build directory
-LIBS="-L.. -ldb -ldb_cxx"
-CXXFLAGS="-I.. -I../../dbinc"
-
-# Test must be run from a local build directory, not from a test
-# directory.
-cd ..
-[ -f db_config.h ] || {
- echo 'FAIL: chk.javatests must be run from a local build directory.'
- exit 1
-}
-[ -d ../env ] || {
- echo 'FAIL: chk.javatests must be run from a local build directory.'
- exit 1
-}
-version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
-[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
- echo "FAIL: unable to build libdb_java-$version.la"
- exit 1
-}
-[ -f db.jar ] || make db.jar || {
- echo 'FAIL: unable to build db.jar'
- exit 1
-}
-testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
-
-for testname in $testnames; do
- if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
- echo " **** java test $testname ignored"
- continue
- fi
-
- echo " ==== java test $testname"
- rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
- cd ./TESTJAVA
- testprefix=../$TEST_JAVA_SRCDIR/$testname
- ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
-pwd
- echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
- exit 1
- }
- rm -f ../$testname.compileout
- infile=$testprefix.testin
- [ -f $infile ] || infile=/dev/null
- goodoutfile=$testprefix.testout
- [ -f $goodoutfile ] || goodoutfile=/dev/null
- gooderrfile=$testprefix.testerr
- [ -f $gooderrfile ] || gooderrfile=/dev/null
- ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
- cmp ../$testname.out $goodoutfile > /dev/null || {
- echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
- exit 1
- }
- cmp ../$testname.err $gooderrfile > /dev/null || {
- echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
- exit 1
- }
- cd ..
- rm -f $testname.err $testname.out
-done
-rm -rf TESTJAVA
-exit 0
diff --git a/storage/bdb/test/scr016/ignore b/storage/bdb/test/scr016/ignore
deleted file mode 100644
index 03469ecdc51..00000000000
--- a/storage/bdb/test/scr016/ignore
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# $Id: ignore,v 1.7 2003/08/07 15:48:03 mjc Exp $
-#
-# A list of tests to ignore
-
-# These tests are not debugged
-TestRpcServer
-TestReplication
-
-# These are currently not working
-TestAppendRecno
-TestAssociate
-TestLogc
-TestConstruct01
-TestConstruct02
-
-# We no longer check to see that a Dbt is used more than
-# once simultaneously. It's no longer a disastrous error.
-TestSameDbt
-
-# TestUtil is used by the other tests, it does not stand on its own
-TestUtil
-
-# XA needs a special installation, it is not part of testall
-TestXAServlet
diff --git a/storage/bdb/test/scr016/testall b/storage/bdb/test/scr016/testall
deleted file mode 100644
index a4e1b5a8c70..00000000000
--- a/storage/bdb/test/scr016/testall
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh -
-# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $
-#
-# Run all the Java regression tests
-
-ecode=0
-prefixarg=""
-stdinarg=""
-while :
-do
- case "$1" in
- --prefix=* )
- prefixarg="$1"; shift;;
- --stdin )
- stdinarg="$1"; shift;;
- * )
- break
- esac
-done
-files="`find . -name \*.java -print`"
-for file in $files; do
- name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
- if grep $name ignore > /dev/null; then
- echo " **** java test $name ignored"
- else
- echo " ==== java test $name"
- if ! sh ./testone $prefixarg $stdinarg $name; then
- ecode=1
- fi
- fi
-done
-exit $ecode
diff --git a/storage/bdb/test/scr016/testone b/storage/bdb/test/scr016/testone
deleted file mode 100644
index ff654da3af8..00000000000
--- a/storage/bdb/test/scr016/testone
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/bin/sh -
-# $Id: testone,v 1.7 2003/05/06 17:09:44 dda Exp $
-#
-# Run just one Java regression test, the single argument
-# is the classname within this package.
-
-error()
-{
- echo '' >&2
- echo "Java regression error: $@" >&2
- echo '' >&2
- ecode=1
-}
-
-# compares the result against the good version,
-# reports differences, and removes the result file
-# if there are no differences.
-#
-compare_result()
-{
- good="$1"
- latest="$2"
- if [ ! -e "$good" ]; then
- echo "Note: $good does not exist"
- return
- fi
- tmpout=/tmp/blddb$$.tmp
- diff "$good" "$latest" > $tmpout
- if [ -s $tmpout ]; then
- nbad=`grep '^[0-9]' $tmpout | wc -l`
- error "$good and $latest differ in $nbad places."
- else
- rm $latest
- fi
- rm -f $tmpout
-}
-
-ecode=0
-stdinflag=n
-JAVA=${JAVA:-java}
-JAVAC=${JAVAC:-javac}
-
-# classdir is relative to TESTDIR subdirectory
-classdir=./classes
-
-# CLASSPATH is used by javac and java.
-# We use CLASSPATH rather than the -classpath command line option
-# because the latter behaves differently from JDK1.1 and JDK1.2
-export CLASSPATH="$classdir:$CLASSPATH"
-
-# determine the prefix of the install tree
-prefix=""
-while :
-do
- case "$1" in
- --prefix=* )
- prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
- export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
- export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
- ;;
- --stdin )
- stdinflag=y; shift
- ;;
- * )
- break
- ;;
- esac
-done
-
-if [ "$#" = 0 ]; then
- echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
- exit 1
-fi
-name="$1"
-
-# class must be public
-if ! grep "public.*class.*$name" $name.java > /dev/null; then
- error "public class $name is not declared in file $name.java"
- exit 1
-fi
-
-# compile
-rm -rf TESTDIR; mkdir TESTDIR
-cd ./TESTDIR
-mkdir -p $classdir
-${JAVAC} -deprecation -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
-ERR=$?
-if [ $ERR != 0 -o -s ../$name.compileout ]; then
- error "compilation of $name failed, see $name.compileout"
- if [ "$ERR" != 0 ]; then
- exit 1
- fi
-else
- rm -f ../$name.compileout
-fi
-
-# find input and error file
-infile=../$name.testin
-if [ ! -f $infile ]; then
- infile=/dev/null
-fi
-
-# run and diff results
-rm -rf TESTDIR
-if [ "$stdinflag" = y ]
-then
- ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
-else
- ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
-fi
-cd ..
-
-testerr=$name.testerr
-if [ ! -f $testerr ]; then
- testerr=/dev/null
-fi
-
-testout=$name.testout
-if [ ! -f $testout ]; then
- testout=/dev/null
-fi
-
-compare_result $testout $name.out
-compare_result $testerr $name.err
-rm -rf TESTDIR
-exit $ecode
diff --git a/storage/bdb/test/scr017/O.BH b/storage/bdb/test/scr017/O.BH
deleted file mode 100644
index cd499d38779..00000000000
--- a/storage/bdb/test/scr017/O.BH
+++ /dev/null
@@ -1,196 +0,0 @@
-abc_10_efg
-abc_10_efg
-abc_11_efg
-abc_11_efg
-abc_12_efg
-abc_12_efg
-abc_13_efg
-abc_13_efg
-abc_14_efg
-abc_14_efg
-abc_15_efg
-abc_15_efg
-abc_16_efg
-abc_16_efg
-abc_17_efg
-abc_17_efg
-abc_18_efg
-abc_18_efg
-abc_19_efg
-abc_19_efg
-abc_1_efg
-abc_1_efg
-abc_20_efg
-abc_20_efg
-abc_21_efg
-abc_21_efg
-abc_22_efg
-abc_22_efg
-abc_23_efg
-abc_23_efg
-abc_24_efg
-abc_24_efg
-abc_25_efg
-abc_25_efg
-abc_26_efg
-abc_26_efg
-abc_27_efg
-abc_27_efg
-abc_28_efg
-abc_28_efg
-abc_29_efg
-abc_29_efg
-abc_2_efg
-abc_2_efg
-abc_30_efg
-abc_30_efg
-abc_31_efg
-abc_31_efg
-abc_32_efg
-abc_32_efg
-abc_33_efg
-abc_33_efg
-abc_34_efg
-abc_34_efg
-abc_36_efg
-abc_36_efg
-abc_37_efg
-abc_37_efg
-abc_38_efg
-abc_38_efg
-abc_39_efg
-abc_39_efg
-abc_3_efg
-abc_3_efg
-abc_40_efg
-abc_40_efg
-abc_41_efg
-abc_41_efg
-abc_42_efg
-abc_42_efg
-abc_43_efg
-abc_43_efg
-abc_44_efg
-abc_44_efg
-abc_45_efg
-abc_45_efg
-abc_46_efg
-abc_46_efg
-abc_47_efg
-abc_47_efg
-abc_48_efg
-abc_48_efg
-abc_49_efg
-abc_49_efg
-abc_4_efg
-abc_4_efg
-abc_50_efg
-abc_50_efg
-abc_51_efg
-abc_51_efg
-abc_52_efg
-abc_52_efg
-abc_53_efg
-abc_53_efg
-abc_54_efg
-abc_54_efg
-abc_55_efg
-abc_55_efg
-abc_56_efg
-abc_56_efg
-abc_57_efg
-abc_57_efg
-abc_58_efg
-abc_58_efg
-abc_59_efg
-abc_59_efg
-abc_5_efg
-abc_5_efg
-abc_60_efg
-abc_60_efg
-abc_61_efg
-abc_61_efg
-abc_62_efg
-abc_62_efg
-abc_63_efg
-abc_63_efg
-abc_64_efg
-abc_64_efg
-abc_65_efg
-abc_65_efg
-abc_66_efg
-abc_66_efg
-abc_67_efg
-abc_67_efg
-abc_68_efg
-abc_68_efg
-abc_69_efg
-abc_69_efg
-abc_6_efg
-abc_6_efg
-abc_70_efg
-abc_70_efg
-abc_71_efg
-abc_71_efg
-abc_72_efg
-abc_72_efg
-abc_73_efg
-abc_73_efg
-abc_74_efg
-abc_74_efg
-abc_75_efg
-abc_75_efg
-abc_76_efg
-abc_76_efg
-abc_77_efg
-abc_77_efg
-abc_78_efg
-abc_78_efg
-abc_79_efg
-abc_79_efg
-abc_7_efg
-abc_7_efg
-abc_80_efg
-abc_80_efg
-abc_81_efg
-abc_81_efg
-abc_82_efg
-abc_82_efg
-abc_83_efg
-abc_83_efg
-abc_84_efg
-abc_84_efg
-abc_85_efg
-abc_85_efg
-abc_86_efg
-abc_86_efg
-abc_87_efg
-abc_87_efg
-abc_88_efg
-abc_88_efg
-abc_89_efg
-abc_89_efg
-abc_8_efg
-abc_8_efg
-abc_90_efg
-abc_90_efg
-abc_91_efg
-abc_91_efg
-abc_92_efg
-abc_92_efg
-abc_93_efg
-abc_93_efg
-abc_94_efg
-abc_94_efg
-abc_95_efg
-abc_95_efg
-abc_96_efg
-abc_96_efg
-abc_97_efg
-abc_97_efg
-abc_98_efg
-abc_98_efg
-abc_99_efg
-abc_99_efg
-abc_9_efg
-abc_9_efg
diff --git a/storage/bdb/test/scr017/O.R b/storage/bdb/test/scr017/O.R
deleted file mode 100644
index d78a04727d8..00000000000
--- a/storage/bdb/test/scr017/O.R
+++ /dev/null
@@ -1,196 +0,0 @@
-1
-abc_1_efg
-2
-abc_2_efg
-3
-abc_3_efg
-4
-abc_4_efg
-5
-abc_5_efg
-6
-abc_6_efg
-7
-abc_7_efg
-8
-abc_8_efg
-9
-abc_9_efg
-10
-abc_10_efg
-11
-abc_11_efg
-12
-abc_12_efg
-13
-abc_13_efg
-14
-abc_14_efg
-15
-abc_15_efg
-16
-abc_16_efg
-17
-abc_17_efg
-18
-abc_18_efg
-19
-abc_19_efg
-20
-abc_20_efg
-21
-abc_21_efg
-22
-abc_22_efg
-23
-abc_23_efg
-24
-abc_24_efg
-25
-abc_25_efg
-26
-abc_26_efg
-27
-abc_27_efg
-28
-abc_28_efg
-29
-abc_29_efg
-30
-abc_30_efg
-31
-abc_31_efg
-32
-abc_32_efg
-33
-abc_33_efg
-34
-abc_34_efg
-35
-abc_36_efg
-36
-abc_37_efg
-37
-abc_38_efg
-38
-abc_39_efg
-39
-abc_40_efg
-40
-abc_41_efg
-41
-abc_42_efg
-42
-abc_43_efg
-43
-abc_44_efg
-44
-abc_45_efg
-45
-abc_46_efg
-46
-abc_47_efg
-47
-abc_48_efg
-48
-abc_49_efg
-49
-abc_50_efg
-50
-abc_51_efg
-51
-abc_52_efg
-52
-abc_53_efg
-53
-abc_54_efg
-54
-abc_55_efg
-55
-abc_56_efg
-56
-abc_57_efg
-57
-abc_58_efg
-58
-abc_59_efg
-59
-abc_60_efg
-60
-abc_61_efg
-61
-abc_62_efg
-62
-abc_63_efg
-63
-abc_64_efg
-64
-abc_65_efg
-65
-abc_66_efg
-66
-abc_67_efg
-67
-abc_68_efg
-68
-abc_69_efg
-69
-abc_70_efg
-70
-abc_71_efg
-71
-abc_72_efg
-72
-abc_73_efg
-73
-abc_74_efg
-74
-abc_75_efg
-75
-abc_76_efg
-76
-abc_77_efg
-77
-abc_78_efg
-78
-abc_79_efg
-79
-abc_80_efg
-80
-abc_81_efg
-81
-abc_82_efg
-82
-abc_83_efg
-83
-abc_84_efg
-84
-abc_85_efg
-85
-abc_86_efg
-86
-abc_87_efg
-87
-abc_88_efg
-88
-abc_89_efg
-89
-abc_90_efg
-90
-abc_91_efg
-91
-abc_92_efg
-92
-abc_93_efg
-93
-abc_94_efg
-94
-abc_95_efg
-95
-abc_96_efg
-96
-abc_97_efg
-97
-abc_98_efg
-98
-abc_99_efg
diff --git a/storage/bdb/test/scr017/chk.db185 b/storage/bdb/test/scr017/chk.db185
deleted file mode 100644
index e6bfef84562..00000000000
--- a/storage/bdb/test/scr017/chk.db185
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.db185,v 1.3 2003/09/30 19:31:36 bostic Exp $
-#
-# Check to make sure we can run DB 1.85 code.
-
-[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
- echo 'FAIL: unable to find or build libdb.a'
- exit 1
-}
-
-if cc -g -Wall -I.. t.c ../libdb.a -o t; then
- :
-else
- echo "FAIL: unable to compile test program t.c"
- exit 1
-fi
-
-if ./t; then
- :
-else
- echo "FAIL: test program failed"
- exit 1
-fi
-
-# Clean up so the next test doesn't get confused.
-rm -rf *
-
-exit 0
diff --git a/storage/bdb/test/scr017/t.c b/storage/bdb/test/scr017/t.c
deleted file mode 100644
index f03b33880d6..00000000000
--- a/storage/bdb/test/scr017/t.c
+++ /dev/null
@@ -1,188 +0,0 @@
-#include <sys/types.h>
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "db_185.h"
-
-void err(char *);
-int mycmp(const DBT *, const DBT *);
-void ops(DB *, int);
-
-int
-main()
-{
- DB *dbp;
- HASHINFO h_info;
- BTREEINFO b_info;
- RECNOINFO r_info;
-
- printf("\tBtree...\n");
- memset(&b_info, 0, sizeof(b_info));
- b_info.flags = R_DUP;
- b_info.cachesize = 100 * 1024;
- b_info.psize = 512;
- b_info.lorder = 4321;
- b_info.compare = mycmp;
- (void)remove("a.db");
- if ((dbp =
- dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
- err("dbopen: btree");
- ops(dbp, DB_BTREE);
-
- printf("\tHash...\n");
- memset(&h_info, 0, sizeof(h_info));
- h_info.bsize = 512;
- h_info.ffactor = 6;
- h_info.nelem = 1000;
- h_info.cachesize = 100 * 1024;
- h_info.lorder = 1234;
- (void)remove("a.db");
- if ((dbp =
- dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
- err("dbopen: hash");
- ops(dbp, DB_HASH);
-
- printf("\tRecno...\n");
- memset(&r_info, 0, sizeof(r_info));
- r_info.flags = R_FIXEDLEN;
- r_info.cachesize = 100 * 1024;
- r_info.psize = 1024;
- r_info.reclen = 37;
- (void)remove("a.db");
- if ((dbp =
- dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
- err("dbopen: recno");
- ops(dbp, DB_RECNO);
-
- return (0);
-}
-
-int
-mycmp(a, b)
- const DBT *a, *b;
-{
- size_t len;
- u_int8_t *p1, *p2;
-
- len = a->size > b->size ? b->size : a->size;
- for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
- if (*p1 != *p2)
- return ((long)*p1 - (long)*p2);
- return ((long)a->size - (long)b->size);
-}
-
-void
-ops(dbp, type)
- DB *dbp;
- int type;
-{
- FILE *outfp;
- DBT key, data;
- recno_t recno;
- int i, ret;
- char buf[64];
-
- memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
-
- for (i = 1; i < 100; ++i) { /* Test DB->put. */
- sprintf(buf, "abc_%d_efg", i);
- if (type == DB_RECNO) {
- recno = i;
- key.data = &recno;
- key.size = sizeof(recno);
- } else {
- key.data = data.data = buf;
- key.size = data.size = strlen(buf);
- }
-
- data.data = buf;
- data.size = strlen(buf);
- if (dbp->put(dbp, &key, &data, 0))
- err("DB->put");
- }
-
- if (type == DB_RECNO) { /* Test DB->get. */
- recno = 97;
- key.data = &recno;
- key.size = sizeof(recno);
- } else {
- key.data = buf;
- key.size = strlen(buf);
- }
- sprintf(buf, "abc_%d_efg", 97);
- if (dbp->get(dbp, &key, &data, 0) != 0)
- err("DB->get");
- if (memcmp(data.data, buf, strlen(buf)))
- err("DB->get: wrong data returned");
-
- if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
- recno = 42;
- key.data = &recno;
- key.size = sizeof(recno);
- } else {
- key.data = buf;
- key.size = strlen(buf);
- }
- sprintf(buf, "abc_%d_efg", 42);
- if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
- err("DB->put: no-overwrite succeeded");
-
- if (type == DB_RECNO) { /* Test DB->del. */
- recno = 35;
- key.data = &recno;
- key.size = sizeof(recno);
- } else {
- sprintf(buf, "abc_%d_efg", 35);
- key.data = buf;
- key.size = strlen(buf);
- }
- if (dbp->del(dbp, &key, 0))
- err("DB->del");
-
- /* Test DB->seq. */
- if ((outfp = fopen("output", "w")) == NULL)
- err("fopen: output");
- while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
- if (type == DB_RECNO)
- fprintf(outfp, "%d\n", *(int *)key.data);
- else
- fprintf(outfp,
- "%.*s\n", (int)key.size, (char *)key.data);
- fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
- }
- if (ret != 1)
- err("DB->seq");
- fclose(outfp);
- switch (type) {
- case DB_BTREE:
- ret = system("cmp output O.BH");
- break;
- case DB_HASH:
- ret = system("sort output | cmp - O.BH");
- break;
- case DB_RECNO:
- ret = system("cmp output O.R");
- break;
- }
- if (ret != 0)
- err("output comparison failed");
-
- if (dbp->sync(dbp, 0)) /* Test DB->sync. */
- err("DB->sync");
-
- if (dbp->close(dbp)) /* Test DB->close. */
- err("DB->close");
-}
-
-void
-err(s)
- char *s;
-{
- fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
- exit (1);
-}
diff --git a/storage/bdb/test/scr018/chk.comma b/storage/bdb/test/scr018/chk.comma
deleted file mode 100644
index 42df48d1881..00000000000
--- a/storage/bdb/test/scr018/chk.comma
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $
-#
-# Look for trailing commas in declarations. Some compilers can't handle:
-# enum {
-# foo,
-# bar,
-# };
-
-[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
- echo 'FAIL: unable to find or build libdb.a'
- exit 1
-}
-
-if cc -g -Wall -I.. t.c ../libdb.a -o t; then
- :
-else
- echo "FAIL: unable to compile test program t.c"
- exit 1
-fi
-
-if ./t ../../*/*.[ch] ../../*/*.in; then
- :
-else
- echo "FAIL: test program failed"
- exit 1
-fi
-
-exit 0
diff --git a/storage/bdb/test/scr018/t.c b/storage/bdb/test/scr018/t.c
deleted file mode 100644
index 4056a605928..00000000000
--- a/storage/bdb/test/scr018/t.c
+++ /dev/null
@@ -1,46 +0,0 @@
-#include <sys/types.h>
-
-#include <ctype.h>
-#include <errno.h>
-#include <stdio.h>
-#include <strings.h>
-
-int
-chk(f)
- char *f;
-{
- int ch, l, r;
-
- if (freopen(f, "r", stdin) == NULL) {
- fprintf(stderr, "%s: %s\n", f, strerror(errno));
- exit (1);
- }
- for (l = 1, r = 0; (ch = getchar()) != EOF;) {
- if (ch != ',')
- goto next;
- do { ch = getchar(); } while (isblank(ch));
- if (ch != '\n')
- goto next;
- ++l;
- do { ch = getchar(); } while (isblank(ch));
- if (ch != '}')
- goto next;
- r = 1;
- printf("%s: line %d\n", f, l);
-
-next: if (ch == '\n')
- ++l;
- }
- return (r);
-}
-
-int
-main(int argc, char *argv[])
-{
- int r;
-
- for (r = 0; *++argv != NULL;)
- if (chk(*argv))
- r = 1;
- return (r);
-}
diff --git a/storage/bdb/test/scr019/chk.include b/storage/bdb/test/scr019/chk.include
deleted file mode 100644
index edd2bf6b451..00000000000
--- a/storage/bdb/test/scr019/chk.include
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.include,v 1.4 2004/10/07 20:34:39 bostic Exp $
-#
-# Check for inclusion of files already included in db_int.h.
-
-d=../..
-
-# Test must be run from the top-level directory, not from a test directory.
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__1
-t2=__2
-
-egrep -- '#include[ ]' $d/dbinc/db_int.in |
-sed -e '/[ ]db\.h'/d \
- -e 's/^#include.//' \
- -e 's/[<>"]//g' \
- -e 's/[ ].*//' > $t1
-
-for i in `cat $t1`; do
- (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
-done |
-sed -e '/^build/d' \
- -e '/^db_dump185/d' \
- -e '/^examples_c/d' \
- -e '/^libdb_java.*errno.h/d' \
- -e '/^libdb_java.*java_util.h/d' \
- -e '/^mod_db4/d' \
- -e '/^mutex\/tm.c/d' \
- -e '/^perl/d' \
- -e '/^php_db4/d' \
- -e '/^test_/d' \
- > $t2
-
-[ -s $t2 ] && {
- echo 'FAIL: found extraneous includes in the source'
- cat $t2
- exit 1
-}
-exit 0
diff --git a/storage/bdb/test/scr020/chk.inc b/storage/bdb/test/scr020/chk.inc
deleted file mode 100644
index 24c1a47c10d..00000000000
--- a/storage/bdb/test/scr020/chk.inc
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.inc,v 1.2 2003/08/01 16:49:29 bostic Exp $
-#
-# Check for inclusion of db_config.h after "const" or other includes.
-
-d=../..
-
-# Test must be run from the top-level directory, not from a test directory.
-[ -f $d/LICENSE ] || {
- echo 'FAIL: cannot find source distribution directory.'
- exit 1
-}
-
-t1=__1
-t2=__2
-
-(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
- xargs egrep -l '#include.*db_config.h') | tee /tmp/o |
- sed -e '/dbdemo.c$/d' \
- -e '/db_java_wrap.c$/d' > $t1
-
-(for i in `cat $t1`; do
- egrep -w 'db_config.h|const' /dev/null $d/$i | head -1
-done) > $t2
-
-if egrep const $t2 > /dev/null; then
- echo 'FAIL: found const before include of db_config.h'
- egrep const $t2
- exit 1
-fi
-
-:> $t2
-for i in `cat $t1`; do
- egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
-done
-
-if egrep -v db_config.h $t2 > /dev/null; then
- echo 'FAIL: found includes before include of db_config.h'
- egrep -v db_config.h $t2
- exit 1
-fi
-
-exit 0
diff --git a/storage/bdb/test/scr021/chk.flags b/storage/bdb/test/scr021/chk.flags
deleted file mode 100644
index 2f32b82e636..00000000000
--- a/storage/bdb/test/scr021/chk.flags
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.flags,v 1.12 2004/10/19 02:28:19 bostic Exp $
-#
-# Check flag name-spaces.
-
-d=../..
-t1=__1
-t2=__2
-
-if cc -g -Wall -I.. t.c -o t; then
- :
-else
- echo "FAIL: unable to compile test program t.c"
- exit 1
-fi
-
-if ./t $d/*/*.[ch] $d/*/*.in > $t1; then
- :
-else
- echo "FAIL: test program failed"
- exit 1
-fi
-
-echo Checking DB_ENV flags...
-cat $t1 |
-grep '(dbenv,' |
-sed -e '/DB_ENV_/d' \
- -e '/env_method.c.*, mapped_flags*)/d' \
- -e '/env_region.c.*, db_env_reset)/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-grep 'DB_ENV_' $t1 |
-sed -e '/((*dbenv)*,/d' \
- -e '/((*dbp)*->dbenv,/d' \
- -e '/((*infop)*->dbenv,/d' \
- -e '/((*reginfop)*->dbenv,/d' \
- -e '/((*sdbp)*->dbenv,/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-echo Checking DB flags...
-cp $t1 /tmp/_f
-cat $t1 |
-grep '(dbp,' |
-sed -e '/DB_AM_/d' \
- -e '/dbp, mapped_flag)/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-grep 'DB_AM_' $t1 |
-sed -e '/((*[ ]*db_rep->rep_db)*,/d' \
- -e '/((*[ ]*dbc)*->dbp,/d' \
- -e '/((*[ ]*dbc_arg->dbp)*,/d' \
- -e '/((*[ ]*dbp)*,/d' \
- -e '/((*[ ]*dbp)*->s_primary,/d' \
- -e '/((D),/d' \
- -e '/(mdbp,/d' \
- -e '/(pdbp,/d' \
- -e '/(pginfo, /d' \
- -e '/(sdbp,/d' \
- -e '/(subdbp,/d' \
- -e '/fop_util.c:.*(t2dbp,/d' \
- -e '/rep_backup.c.*(rfp,/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-echo Checking DBC flags...
-cat $t1 |
-grep '(dbc,' |
-sed -e '/DBC_/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-grep 'DBC_' $t1 |
-sed -e '/((*dbc)*,/d' \
- -e '/(dbc_arg,/d' \
- -e '/(dbc_c,/d' \
- -e '/(dbc_n,/d' \
- -e '/(dbc_orig,/d' \
- -e '/(opd,/d' \
- -e '/(pdbc,/d' \
- -e '/(sdbc,/d' \
- > $t2
-[ -s $t2 ] && {
- cat $t2
- exit 1
-}
-
-echo Checking for bad use of macros...
-egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
-egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
-egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
-egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
-egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
-[ -s $t1 ] && {
- echo 'if statement followed by non-test macro'
- cat $t1
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/scr022/chk.rr b/storage/bdb/test/scr022/chk.rr
deleted file mode 100644
index 53d8bb15833..00000000000
--- a/storage/bdb/test/scr022/chk.rr
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh -
-#
-# $Id: chk.rr,v 1.5 2004/10/07 20:40:43 bostic Exp $
-
-d=../..
-
-t1=__1
-
-# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
-egrep DB_RUNRECOVERY $d/*/*.c |
- sed -e '/__db_panic(.*, DB_RUNRECOVERY)/d' \
- -e '/case DB_RUNRECOVERY:/d' \
- -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
- -e '/db_err.c:/d' \
- -e '/os_errno.c:.*evalue == DB_RUNRECOVERY/d' \
- -e '/\/php_db4\//d' \
- -e '/rep_backup.c:.*Panic the env and return DB_RUNRECOVERY/d' \
- -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
- -e '/txn.c:.*returned DB_RUNRECOVERY and we need to/d' \
- > $t1
-[ -s $t1 ] && {
- echo "DB_RUNRECOVERY used; should be a call to db_panic."
- cat $t1
- exit 1
-}
-
-exit 0
diff --git a/storage/bdb/test/sdb001.tcl b/storage/bdb/test/sdb001.tcl
deleted file mode 100644
index 4ebb24dea29..00000000000
--- a/storage/bdb/test/sdb001.tcl
+++ /dev/null
@@ -1,147 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb001.tcl,v 11.23 2004/04/05 17:49:25 carol Exp $
-#
-# TEST sdb001 Tests mixing db and subdb operations
-# TEST Tests mixing db and subdb operations
-# TEST Create a db, add data, try to create a subdb.
-# TEST Test naming db and subdb with a leading - for correct parsing
-# TEST Existence check -- test use of -excl with subdbs
-# TEST
-# TEST Test non-subdb and subdb operations
-# TEST Test naming (filenames begin with -)
-# TEST Test existence (cannot create subdb of same name with -excl)
-proc sdb001 { method args } {
- source ./include.tcl
- global errorInfo
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb001: skipping for method $method"
- return
- }
- puts "Subdb001: $method ($args) subdb and non-subdb tests"
-
- set testfile $testdir/subdb001.db
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- set env NULL
- incr eindex
- set env [lindex $args $eindex]
- puts "Subdb001 skipping for env $env"
- return
- }
- # Create the database and open the dictionary
- set subdb subdb0
- cleanup $testdir NULL
- puts "\tSubdb001.a: Non-subdb database and subdb operations"
- #
- # Create a db with no subdbs. Add some data. Close. Try to
- # open/add with a subdb. Should fail.
- #
- puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
- while { [gets $did str] != -1 && $count < 5 } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) $str
- } else {
- set key $str
- }
- set ret [eval \
- {$db put} $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- incr count
- }
- close $did
- error_check_good db_close [$db close] 0
- set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
- {$omethod $testfile $subdb}} db]
- error_check_bad dbopen $ret 0
- #
- # Create a db with no subdbs. Add no data. Close. Try to
- # open/add with a subdb. Should fail.
- #
- set testfile $testdir/subdb001a.db
- puts "\tSubdb001.a.1: Create db, close, try subdb"
- #
- # !!!
- # Using -truncate is illegal when opening for subdbs, but we
- # can use it here because we are not using subdbs for this
- # create.
- #
- set db [eval {berkdb_open -create -truncate -mode 0644} $args \
- {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
- {$omethod $testfile $subdb}} db]
- error_check_bad dbopen $ret 0
-
- if { [is_queue $method] == 1 } {
- puts "Subdb001: skipping remainder of test for method $method"
- return
- }
-
- #
- # Test naming, db and subdb names beginning with -.
- #
- puts "\tSubdb001.b: Naming"
- set cwd [pwd]
- cd $testdir
- set testfile1 -subdb001.db
- set subdb -subdb
- puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
- set ret [catch {eval {berkdb_open -create -mode 0644} $args \
- {$omethod $testfile1 $subdb}} db]
- error_check_bad dbopen $ret 0
- puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
- set db [eval {berkdb_open -create -mode 0644} $args \
- {$omethod -- $testfile1 $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- cd $cwd
-
- #
- # Create 1 db with 1 subdb. Try to create another subdb of
- # the same name. Should fail.
- #
-
- puts "\tSubdb001.c: Existence check"
- set testfile $testdir/subdb001d.db
- set subdb subdb
- set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
- {$omethod $testfile $subdb}} db]
- error_check_good dbopen [is_valid_db $db] TRUE
- set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
- $args {$omethod $testfile $subdb}} db1]
- error_check_bad dbopen $ret 0
- error_check_good db_close [$db close] 0
-
- return
-}
diff --git a/storage/bdb/test/sdb002.tcl b/storage/bdb/test/sdb002.tcl
deleted file mode 100644
index d951d716310..00000000000
--- a/storage/bdb/test/sdb002.tcl
+++ /dev/null
@@ -1,228 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb002.tcl,v 11.41 2004/04/05 17:49:26 carol Exp $
-#
-# TEST sdb002
-# TEST Tests basic subdb functionality
-# TEST Small keys, small data
-# TEST Put/get per key
-# TEST Dump file
-# TEST Close, reopen
-# TEST Dump file
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-# TEST Then repeat using an environment.
-proc sdb002 { method {nentries 10000} args } {
- global passwd
- global has_crypto
-
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- set env NULL
- incr eindex
- set env [lindex $args $eindex]
- puts "Subdb002 skipping for env $env"
- return
- }
- set largs $args
- subdb002_main $method $nentries $largs
- append largs " -chksum "
- subdb002_main $method $nentries $largs
-
- # Skip remainder of test if release does not support encryption.
- if { $has_crypto == 0 } {
- return
- }
-
- append largs "-encryptaes $passwd "
- subdb002_main $method $nentries $largs
-}
-
-proc subdb002_main { method nentries largs } {
- source ./include.tcl
- global encrypt
-
- set largs [convert_args $method $largs]
- set omethod [convert_method $method]
-
- env_cleanup $testdir
-
- puts "Subdb002: $method ($largs) basic subdb tests"
- set testfile $testdir/subdb002.db
- subdb002_body $method $omethod $nentries $largs $testfile NULL
-
- # Run convert_encrypt so that old_encrypt will be reset to
- # the proper value and cleanup will work.
- convert_encrypt $largs
- set encargs ""
- set largs [split_encargs $largs encargs]
-
- cleanup $testdir NULL
- if { [is_queue $omethod] == 1 } {
- set sdb002_env berkdb_env_noerr
- } else {
- set sdb002_env berkdb_env
- }
- set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
- -mode 0644} -home $testdir $encargs]
- error_check_good env_open [is_valid_env $env] TRUE
- puts "Subdb002: $method ($largs) basic subdb tests in an environment"
-
- # We're in an env--use default path to database rather than specifying
- # it explicitly.
- set testfile subdb002.db
- subdb002_body $method $omethod $nentries $largs $testfile $env
- error_check_good env_close [$env close] 0
-}
-
-proc subdb002_body { method omethod nentries largs testfile env } {
- global encrypt
- global passwd
- source ./include.tcl
-
- # Create the database and open the dictionary
- set subdb subdb0
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
-
- if { [is_queue $omethod] == 1 } {
- set sdb002_open berkdb_open_noerr
- } else {
- set sdb002_open berkdb_open
- }
-
- if { $env == "NULL" } {
- set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
- {$omethod $testfile $subdb}} db]
- } else {
- set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
- {-env $env $omethod $testfile $subdb}} db]
- }
-
- #
- # If -queue method, we need to make sure that trying to
- # create a subdb fails.
- if { [is_queue $method] == 1 } {
- error_check_bad dbopen $ret 0
- puts "Subdb002: skipping remainder of test for method $method"
- return
- }
-
- error_check_good dbopen $ret 0
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb002_recno.check
- append gflags " -recno"
- } else {
- set checkfunc subdb002.check
- }
- puts "\tSubdb002.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- set ret [eval \
- {$db put} $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- incr count
- }
- close $did
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tSubdb002.b: dump file"
- set txn ""
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Subdb002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb002.c: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-first" "-next" $subdb
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb002:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdb002.d: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-last" "-prev" $subdb
-
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb002.e: db_dump with subdatabase"
- set outfile $testdir/subdb002.dump
- set dumpargs " -f $outfile -s $subdb "
- if { $encrypt > 0 } {
- append dumpargs " -P $passwd "
- }
- if { $env != "NULL" } {
- append dumpargs " -h $testdir "
- }
- append dumpargs " $testfile"
- set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
- error_check_good dbdump.subdb $stat 0
-}
-
-# Check function for Subdb002; keys and data are identical
-proc subdb002.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc subdb002_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/sdb003.tcl b/storage/bdb/test/sdb003.tcl
deleted file mode 100644
index c652a76d63f..00000000000
--- a/storage/bdb/test/sdb003.tcl
+++ /dev/null
@@ -1,182 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb003.tcl,v 11.29 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb003
-# TEST Tests many subdbs
-# TEST Creates many subdbs and puts a small amount of
-# TEST data in each (many defaults to 1000)
-# TEST
-# TEST Use the first 1000 entries from the dictionary as subdbnames.
-# TEST Insert each with entry as name of subdatabase and a partial list
-# TEST as key/data. After all are entered, retrieve all; compare output
-# TEST to original. Close file, reopen, do retrieve and re-verify.
-proc sdb003 { method {nentries 1000} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb003: skipping for method $method"
- return
- }
-
- puts "Subdb003: $method ($args) many subdb tests"
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb003.db
- set env NULL
- } else {
- set testfile subdb003.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- if { $nentries == 1000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- # Create the database and open the dictionary
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set pflags ""
- set gflags ""
- set txn ""
- set fcount 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb003_recno.check
- append gflags " -recno"
- } else {
- set checkfunc subdb003.check
- }
-
- # Here is the loop where we put and get each key/data pair
- set ndataent 10
- set fdid [open $dict]
- while { [gets $fdid str] != -1 && $fcount < $nentries } {
- if { $str == "" } {
- continue
- }
- set subdb $str
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $ndataent } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key \
- [pad_data $method $str]]]
- incr count
- }
- close $did
- incr fcount
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $ndataent} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $ndataent $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Subdb003:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-first" "-next" $subdb
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb003:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-last" "-prev" $subdb
-
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb003:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- if { [expr $fcount % 100] == 0 } {
- puts -nonewline "$fcount "
- flush stdout
- }
- }
- close $fdid
- puts ""
-}
-
-# Check function for Subdb003; keys and data are identical
-proc subdb003.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc subdb003_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/sdb004.tcl b/storage/bdb/test/sdb004.tcl
deleted file mode 100644
index 273bda9bf4b..00000000000
--- a/storage/bdb/test/sdb004.tcl
+++ /dev/null
@@ -1,241 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb004.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb004
-# TEST Tests large subdb names
-# TEST subdb name = filecontents,
-# TEST key = filename, data = filecontents
-# TEST Put/get per key
-# TEST Dump file
-# TEST Dump subdbs, verify data and subdb name match
-# TEST
-# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
-# TEST Take the source files and dbtest executable and enter their names as
-# TEST the key with their contents as data. After all are entered, retrieve
-# TEST all; compare output to original. Close file, reopen, do retrieve and
-# TEST re-verify.
-proc sdb004 { method args} {
- global names
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
- puts "Subdb004: skipping for method $method"
- return
- }
-
- puts "Subdb004: $method ($args) \
- filecontents=subdbname filename=key filecontents=data pairs"
-
- set txnenv 0
- set envargs ""
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb004.db
- set env NULL
- } else {
- set testfile subdb004.db
- incr eindex
- set env [lindex $args $eindex]
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- }
- set testdir [get_home $env]
- }
- # Create the database and open the dictionary
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- cleanup $testdir $env
- set pflags ""
- set gflags ""
- set txn ""
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb004_recno.check
- append gflags "-recno"
- } else {
- set checkfunc subdb004.check
- }
-
- # Here is the loop where we put and get each key/data pair
- # Note that the subdatabase name is passed in as a char *, not
- # in a DBT, so it may not contain nulls; use only source files.
- set file_list [glob $src_root/*/*.c]
- set fcount [llength $file_list]
- if { $txnenv == 1 && $fcount > 100 } {
- set file_list [lrange $file_list 0 99]
- set fcount 100
- }
-
- set count 0
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $fcount} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- } else {
- set oid [open $t2.tmp w]
- foreach f $file_list {
- puts $oid $f
- }
- close $oid
- filesort $t2.tmp $t2
- }
- puts "\tSubdb004.a: Set/Check each subdb"
- foreach f $file_list {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set names([expr $count + 1]) $f
- } else {
- set key $f
- }
- # Should really catch errors
- set fid [open $f r]
- fconfigure $fid -translation binary
- set data [read $fid]
- set subdb $data
- close $fid
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval \
- {$db put} $txn $pflags {$key [chop_data $method $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Should really catch errors
- set fid [open $t4 w]
- fconfigure $fid -translation binary
- if [catch {eval {$db get} $gflags {$key}} data] {
- puts -nonewline $fid $data
- } else {
- # Data looks like {{key data}}
- set key [lindex [lindex $data 0] 0]
- set data [lindex [lindex $data 0] 1]
- puts -nonewline $fid $data
- }
- close $fid
-
- error_check_good Subdb004:diff($f,$t4) \
- [filecmp $f $t4] 0
-
- incr count
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- # puts "\tSubdb004.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- }
-
- #
- # Now for each file, check that the subdb name is the same
- # as the data in that subdb and that the filename is the key.
- #
- puts "\tSubdb004.b: Compare subdb names with key/data"
- set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set c [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
-
- for {set d [$c get -first] } { [llength $d] != 0 } \
- {set d [$c get -next] } {
- set subdbname [lindex [lindex $d 0] 0]
- set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Output the subdb name
- set ofid [open $t3 w]
- fconfigure $ofid -translation binary
- if { [string compare "\0" \
- [string range $subdbname end end]] == 0 } {
- set slen [expr [string length $subdbname] - 2]
- set subdbname [string range $subdbname 1 $slen]
- }
- puts -nonewline $ofid $subdbname
- close $ofid
-
- # Output the data
- set subc [eval {$subdb cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
- set d [$subc get -first]
- error_check_good dbc_get [expr [llength $d] != 0] 1
- set key [lindex [lindex $d 0] 0]
- set data [lindex [lindex $d 0] 1]
-
- set ofid [open $t1 w]
- fconfigure $ofid -translation binary
- puts -nonewline $ofid $data
- close $ofid
-
- $checkfunc $key $t1
- $checkfunc $key $t3
-
- error_check_good Subdb004:diff($t3,$t1) \
- [filecmp $t3 $t1] 0
- error_check_good curs_close [$subc close] 0
- error_check_good db_close [$subdb close] 0
- }
- error_check_good curs_close [$c close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- if { [is_record_based $method] != 1 } {
- fileremove $t2.tmp
- }
-}
-
-# Check function for subdb004; key should be file name; data should be contents
-proc subdb004.check { binfile tmpfile } {
- source ./include.tcl
-
- error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
- [filecmp $binfile $tmpfile] 0
-}
-proc subdb004_recno.check { binfile tmpfile } {
- global names
- source ./include.tcl
-
- set fname $names($binfile)
- error_check_good key"$binfile"_exists [info exists names($binfile)] 1
- error_check_good Subdb004:datamismatch($fname,$tmpfile) \
- [filecmp $fname $tmpfile] 0
-}
diff --git a/storage/bdb/test/sdb005.tcl b/storage/bdb/test/sdb005.tcl
deleted file mode 100644
index 7173bef009c..00000000000
--- a/storage/bdb/test/sdb005.tcl
+++ /dev/null
@@ -1,146 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb005.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb005
-# TEST Tests cursor operations in subdbs
-# TEST Put/get per key
-# TEST Verify cursor operations work within subdb
-# TEST Verify cursor operations do not work across subdbs
-# TEST
-#
-# We should test this on all btrees, all hash, and a combination thereof
-proc sdb005 {method {nentries 100} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb005: skipping for method $method"
- return
- }
-
- puts "Subdb005: $method ( $args ) subdb cursor operations test"
- set txnenv 0
- set envargs ""
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb005.db
- set env NULL
- } else {
- set testfile subdb005.db
- incr eindex
- set env [lindex $args $eindex]
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- if { $nentries == 100 } {
- set nentries 20
- }
- }
- set testdir [get_home $env]
- }
-
- cleanup $testdir $env
- set txn ""
- set psize 8192
- set duplist {-1 -1 -1 -1 -1}
- build_all_subdb \
- $testfile [list $method] $psize $duplist $nentries $args
- set numdb [llength $duplist]
- #
- # Get a cursor in each subdb and move past the end of each
- # subdb. Make sure we don't end up in another subdb.
- #
- puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for {set i 0} {$i < $numdb} {incr i} {
- set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set db_handle($i) $db
- # Used in 005.c test
- lappend subdbnames sub$i.db
-
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- set d [$dbc get -first]
- error_check_good dbc_get [expr [llength $d] != 0] 1
-
- # Used in 005.b test
- set db_key($i) [lindex [lindex $d 0] 0]
-
- set d [$dbc get -prev]
- error_check_good dbc_get [expr [llength $d] == 0] 1
- set d [$dbc get -last]
- error_check_good dbc_get [expr [llength $d] != 0] 1
- set d [$dbc get -next]
- error_check_good dbc_get [expr [llength $d] == 0] 1
- error_check_good dbc_close [$dbc close] 0
- }
- #
- # Get a key from each subdb and try to get this key in a
- # different subdb. Make sure it fails
- #
- puts "\tSubdb005.b: Get keys in different subdb's"
- for {set i 0} {$i < $numdb} {incr i} {
- set n [expr $i + 1]
- if {$n == $numdb} {
- set n 0
- }
- set db $db_handle($i)
- if { [is_record_based $method] == 1 } {
- set d [eval {$db get -recno} $txn {$db_key($n)}]
- error_check_good \
- db_get [expr [llength $d] == 0] 1
- } else {
- set d [eval {$db get} $txn {$db_key($n)}]
- error_check_good db_get [expr [llength $d] == 0] 1
- }
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- #
- # Clean up
- #
- for {set i 0} {$i < $numdb} {incr i} {
- error_check_good db_close [$db_handle($i) close] 0
- }
-
- #
- # Check contents of DB for subdb names only. Makes sure that
- # every subdbname is there and that nothing else is there.
- #
- puts "\tSubdb005.c: Check DB is read-only"
- error_check_bad dbopen [catch \
- {berkdb_open_noerr -unknown $testfile} ret] 0
-
- puts "\tSubdb005.d: Check contents of DB for subdb names only"
- set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set subdblist [$db get -glob *]
- foreach kd $subdblist {
- # subname also used in subdb005.e,f below
- set subname [lindex $kd 0]
- set i [lsearch $subdbnames $subname]
- error_check_good subdb_search [expr $i != -1] 1
- set subdbnames [lreplace $subdbnames $i $i]
- }
- error_check_good subdb_done [llength $subdbnames] 0
-
- error_check_good db_close [$db close] 0
- return
-}
diff --git a/storage/bdb/test/sdb006.tcl b/storage/bdb/test/sdb006.tcl
deleted file mode 100644
index ffe3e470074..00000000000
--- a/storage/bdb/test/sdb006.tcl
+++ /dev/null
@@ -1,169 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb006.tcl,v 11.23 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb006
-# TEST Tests intra-subdb join
-# TEST
-# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
-# TEST everything else does as well. We'll create test databases called
-# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
-# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
-# TEST ... where N is the number of the database. Primary.db is the primary
-# TEST database, and sub0.db is the database that has no matching duplicates.
-# TEST All of these are within a single database.
-#
-# We should test this on all btrees, all hash, and a combination thereof
-proc sdb006 {method {nentries 100} args } {
- source ./include.tcl
- global rand_init
-
- # NB: these flags are internal only, ok
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] } {
- puts "\tSubdb006 skipping for method $method."
- return
- }
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb006.db
- set env NULL
- } else {
- set testfile subdb006.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- if { $nentries == 100 } {
- # !!!
- # nentries must be greater than the number
- # of do_join_subdb calls below.
- #
- set nentries 35
- }
- }
- set testdir [get_home $env]
- }
- berkdb srand $rand_init
-
- set oargs $args
- foreach opt {" -dup" " -dupsort"} {
- append args $opt
-
- puts "Subdb006: $method ( $args ) Intra-subdb join"
- set txn ""
- #
- # Get a cursor in each subdb and move past the end of each
- # subdb. Make sure we don't end up in another subdb.
- #
- puts "\tSubdb006.a: Intra-subdb join"
-
- if { $env != "NULL" } {
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set psize 8192
- set duplist {0 50 25 16 12}
- set numdb [llength $duplist]
- build_all_subdb $testfile [list $method] $psize \
- $duplist $nentries $args
-
- # Build the primary
- puts "Subdb006: Building the primary database $method"
- set oflags "-create -mode 0644 [conv $omethod \
- [berkdb random_int 1 2]]"
- set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
- error_check_good dbopen [is_valid_db $db] TRUE
- for { set i 0 } { $i < 1000 } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set key [format "%04d" $i]
- set ret [eval {$db put} $txn {$key stub}]
- error_check_good "primary put" $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- error_check_good "primary close" [$db close] 0
- set did [open $dict]
- gets $did str
- do_join_subdb $testfile primary.db "1 0" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2 0" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 0" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 0" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1 2" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1 2 3" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1 3" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "1 4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2 3" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 2" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2 4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 2" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 3" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "2 3 4" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 4 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "0 2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "3 2 0" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
- gets $did str
- do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
-
- close $did
- }
-}
diff --git a/storage/bdb/test/sdb007.tcl b/storage/bdb/test/sdb007.tcl
deleted file mode 100644
index 86f46599296..00000000000
--- a/storage/bdb/test/sdb007.tcl
+++ /dev/null
@@ -1,141 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb007.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST sdb007
-# TEST Tests page size difference errors between subdbs.
-# TEST Test 3 different scenarios for page sizes.
-# TEST 1. Create/open with a default page size, 2nd subdb create with
-# TEST specified different one, should error.
-# TEST 2. Create/open with specific page size, 2nd subdb create with
-# TEST different one, should error.
-# TEST 3. Create/open with specified page size, 2nd subdb create with
-# TEST same specified size, should succeed.
-# TEST (4th combo of using all defaults is a basic test, done elsewhere)
-proc sdb007 { method args } {
- source ./include.tcl
- global is_envmethod
-
- set db2args [convert_args -btree $args]
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb007: skipping for method $method"
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Subdb007: skipping for specific page sizes"
- return
- }
-
- puts "Subdb007: $method ($args) subdb tests with different page sizes"
-
- set txnenv 0
- set envargs ""
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb007.db
- set env NULL
- } else {
- set testfile subdb007.db
- incr eindex
- set env [lindex $args $eindex]
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- append db2args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set sub1 "sub1"
- set sub2 "sub2"
- cleanup $testdir $env
- set txn ""
-
- puts "\tSubdb007.a.0: create subdb with default page size"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $sub1}]
- error_check_good subdb [is_valid_db $db] TRUE
- #
- # Figure out what the default page size is so that we can
- # guarantee we create it with a different value.
- set statret [$db stat]
- set pgsz 0
- foreach pair $statret {
- set fld [lindex $pair 0]
- if { [string compare $fld {Page size}] == 0 } {
- set pgsz [lindex $pair 1]
- }
- }
- error_check_good dbclose [$db close] 0
-
- if { $pgsz == 512 } {
- set pgsz2 2048
- } else {
- set pgsz2 512
- }
-
- puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
- set stat [catch {eval {berkdb_open_noerr -create -btree} \
- $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
- error_check_good subdb:pgsz $stat 1
- # We'll get a different error if running in an env,
- # because the env won't have been opened with noerr.
- # Skip the test for what the error is, just getting the
- # error is enough.
- if { $is_envmethod == 0 } {
- error_check_good subdb:fail [is_substr $ret \
- "Different pagesize specified"] 1
- }
-
- set ret [eval {berkdb dbremove} $envargs {$testfile}]
-
- puts "\tSubdb007.b.0: create subdb with specified page size"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
- error_check_good subdb [is_valid_db $db] TRUE
- set statret [$db stat]
- set newpgsz 0
- foreach pair $statret {
- set fld [lindex $pair 0]
- if { [string compare $fld {Page size}] == 0 } {
- set newpgsz [lindex $pair 1]
- }
- }
- error_check_good pgsize $pgsz2 $newpgsz
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb007.b.1: create 2nd subdb with different page size"
- set stat [catch {eval {berkdb_open_noerr -create -btree} \
- $db2args {-pagesize $pgsz $testfile $sub2}} ret]
- error_check_good subdb:pgsz $stat 1
- if { $is_envmethod == 0 } {
- error_check_good subdb:fail [is_substr $ret \
- "Different pagesize specified"] 1
- }
-
- set ret [eval {berkdb dbremove} $envargs {$testfile}]
-
- puts "\tSubdb007.c.0: create subdb with specified page size"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
-}
diff --git a/storage/bdb/test/sdb008.tcl b/storage/bdb/test/sdb008.tcl
deleted file mode 100644
index 3beb8313b67..00000000000
--- a/storage/bdb/test/sdb008.tcl
+++ /dev/null
@@ -1,94 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb008.tcl,v 11.31 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb008
-# TEST Tests explicit setting of lorders for subdatabases -- the
-# TEST lorder should be ignored.
-proc sdb008 { method args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb008: skipping for method $method"
- return
- }
- set eindex [lsearch -exact $args "-env"]
-
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile1 $testdir/subdb008a.db
- set testfile2 $testdir/subdb008b.db
- set env NULL
- } else {
- set testfile1 subdb008a.db
- set testfile2 subdb008b.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "Subdb008: $method ($args) subdb tests with different lorders"
-
- puts "\tSubdb008.a.0: create subdb with system default lorder"
- set lorder "1234"
- if { [big_endian] } {
- set lorder "4321"
- }
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile1 "sub1"}]
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- # Explicitly try to create subdb's of each byte order. In both
- # cases the subdb should be forced to the byte order of the
- # parent database.
- puts "\tSubdb008.a.1: Try to create subdb with -1234 lorder"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder 1234 $omethod $testfile1 "sub2"}]
- error_check_good lorder_1234 [eval $db get_lorder] $lorder
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb008.a.2: Try to create subdb with -4321 lorder"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder 4321 $omethod $testfile1 "sub3"}]
- error_check_good lorder_4321 [eval $db get_lorder] $lorder
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb008.b.0: create subdb with non-default lorder"
- set reverse_lorder "4321"
- if { [big_endian] } {
- set reverse_lorder "1234"
- }
- set db [eval {berkdb_open -create -mode 0644} \
- {-lorder $reverse_lorder} $args {$omethod $testfile2 "sub1"}]
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb008.b.1: Try to create subdb with -1234 lorder"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder 1234 $omethod $testfile2 "sub2"}]
- error_check_good lorder_1234 [eval $db get_lorder] $reverse_lorder
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- puts "\tSubdb008.b.2: Try to create subdb with -4321 lorder"
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder 4321 $omethod $testfile2 "sub3"}]
- error_check_good lorder_4321 [eval $db get_lorder] $reverse_lorder
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-}
diff --git a/storage/bdb/test/sdb009.tcl b/storage/bdb/test/sdb009.tcl
deleted file mode 100644
index 3f36a27d781..00000000000
--- a/storage/bdb/test/sdb009.tcl
+++ /dev/null
@@ -1,108 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb009.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb009
-# TEST Test DB->rename() method for subdbs
-proc sdb009 { method args } {
- global errorCode
- source ./include.tcl
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- puts "Subdb009: $method ($args): Test of DB->rename()"
-
- if { [is_queue $method] == 1 } {
- puts "\tSubdb009: Skipping for method $method."
- return
- }
-
- set txnenv 0
- set envargs ""
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb009.db
- set env NULL
- } else {
- set testfile subdb009.db
- incr eindex
- set env [lindex $args $eindex]
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set oldsdb OLDDB
- set newsdb NEWDB
-
- # Make sure we're starting from a clean slate.
- cleanup $testdir $env
- error_check_bad "$testfile exists" [file exists $testfile] 1
-
- puts "\tSubdb009.a: Create/rename file"
- puts "\t\tSubdb009.a.1: create"
- set db [eval {berkdb_open -create -mode 0644}\
- $omethod $args {$testfile $oldsdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # The nature of the key and data are unimportant; use numeric key
- # so record-based methods don't need special treatment.
- set txn ""
- set key 1
- set data [pad_data $method data]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- error_check_good dbput [eval {$db put} $txn {$key $data}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good dbclose [$db close] 0
-
- puts "\t\tSubdb009.a.2: rename"
- error_check_good rename_file [eval {berkdb dbrename} $envargs \
- {$testfile $oldsdb $newsdb}] 0
-
- puts "\t\tSubdb009.a.3: check"
- # Open again with create to make sure we've really completely
- # disassociated the subdb from the old name.
- set odb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $testfile $oldsdb]
- error_check_good odb_open [is_valid_db $odb] TRUE
- set odbt [$odb get $key]
- error_check_good odb_close [$odb close] 0
-
- set ndb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $testfile $newsdb]
- error_check_good ndb_open [is_valid_db $ndb] TRUE
- set ndbt [$ndb get $key]
- error_check_good ndb_close [$ndb close] 0
-
- # The DBT from the "old" database should be empty, not the "new" one.
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
- error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
-
- # Now there's both an old and a new. Rename the "new" to the "old"
- # and make sure that fails.
- puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
- set ret [catch {eval {berkdb dbrename} $envargs $testfile \
- $oldsdb $newsdb} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
-
- puts "\tSubdb009 succeeded."
-}
diff --git a/storage/bdb/test/sdb010.tcl b/storage/bdb/test/sdb010.tcl
deleted file mode 100644
index 8f1d6f8a171..00000000000
--- a/storage/bdb/test/sdb010.tcl
+++ /dev/null
@@ -1,170 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb010.tcl,v 11.19 2004/10/18 17:34:17 carol Exp $
-#
-# TEST sdb010
-# TEST Test DB->remove() method and DB->truncate() for subdbs
-proc sdb010 { method args } {
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Subdb010: Test of DB->remove() and DB->truncate"
-
- if { [is_queue $method] == 1 } {
- puts "\tSubdb010: Skipping for method $method."
- return
- }
-
- set txnenv 0
- set envargs ""
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are not given an env, create one.
- if { $eindex == -1 } {
- set env [berkdb_env -create -home $testdir -mode 0644]
- error_check_good env_open [is_valid_env $env] TRUE
- } else {
- incr eindex
- set env [lindex $args $eindex]
- }
- set testfile subdb010.db
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- }
- set testdir [get_home $env]
- set tfpath $testdir/$testfile
-
- cleanup $testdir $env
-
- set txn ""
- set testdb DATABASE
- set testdb2 DATABASE2
-
- set db [eval {berkdb_open -create -mode 0644} $omethod \
- $args $envargs $testfile $testdb]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
-
- puts "\tSubdb010.a: Test of DB->remove()"
- error_check_good file_exists_before [file exists $tfpath] 1
- error_check_good db_remove [eval {berkdb dbremove} $envargs \
- $testfile $testdb] 0
-
- # File should still exist.
- error_check_good file_exists_after [file exists $tfpath] 1
-
- # But database should not.
- set ret [catch {eval berkdb_open $omethod \
- $args $envargs $testfile $testdb} res]
- error_check_bad open_failed ret 0
- error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
-
- puts "\tSubdb010.b: Setup for DB->truncate()"
- # The nature of the key and data are unimportant; use numeric key
- # so record-based methods don't need special treatment.
- set key1 1
- set key2 2
- set data1 [pad_data $method data1]
- set data2 [pad_data $method data2]
-
- set db [eval {berkdb_open -create -mode 0644} $omethod \
- $args $envargs {$testfile $testdb}]
- error_check_good db_open [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set db2 [eval {berkdb_open -create -mode 0644} $omethod \
- $args $envargs $testfile $testdb2]
- error_check_good db_open [is_valid_db $db2] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good db_close [$db close] 0
- error_check_good db_close [$db2 close] 0
-
- puts "\tSubdb010.c: truncate"
- #
- # Return value should be 1, the count of how many items were
- # destroyed when we truncated.
- set db [eval {berkdb_open -create -mode 0644} $omethod \
- $args $envargs $testfile $testdb]
- error_check_good db_open [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- error_check_good trunc_subdb [eval {$db truncate} $txn] 1
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tSubdb010.d: check"
- set db [eval {berkdb_open} $args $envargs {$testfile $testdb}]
- error_check_good db_open [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- set kd [$dbc get -first]
- error_check_good trunc_dbcget [llength $kd] 0
- error_check_good dbcclose [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set db2 [eval {berkdb_open} $args $envargs {$testfile $testdb2}]
- error_check_good db_open [is_valid_db $db2] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db2 cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
- set kd [$dbc get -first]
- error_check_bad notrunc_dbcget1 [llength $kd] 0
- set db2kd [list [list $key2 $data2]]
- error_check_good key2 $kd $db2kd
- set kd [$dbc get -next]
- error_check_good notrunc_dbget2 [llength $kd] 0
- error_check_good dbcclose [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good db_close [$db close] 0
- error_check_good db_close [$db2 close] 0
-
- # If we created our env, close it.
- if { $eindex == -1 } {
- error_check_good env_close [$env close] 0
- }
-}
diff --git a/storage/bdb/test/sdb011.tcl b/storage/bdb/test/sdb011.tcl
deleted file mode 100644
index d36b83e5df2..00000000000
--- a/storage/bdb/test/sdb011.tcl
+++ /dev/null
@@ -1,141 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb011.tcl,v 11.14 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb011
-# TEST Test deleting Subdbs with overflow pages
-# TEST Create 1 db with many large subdbs.
-# TEST Test subdatabases with overflow pages.
-proc sdb011 { method {ndups 13} {nsubdbs 10} args} {
- global names
- source ./include.tcl
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
- puts "Subdb011: skipping for method $method"
- return
- }
- set txnenv 0
- set envargs ""
- set max_files 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/subdb011.db
- set env NULL
- set tfpath $testfile
- } else {
- set testfile subdb011.db
- incr eindex
- set env [lindex $args $eindex]
- set envargs " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- append envargs " -auto_commit "
- set max_files 50
- if { $ndups == 13 } {
- set ndups 7
- }
- }
- set testdir [get_home $env]
- set tfpath $testdir/$testfile
- }
-
- # Create the database and open the dictionary
-
- cleanup $testdir $env
- set txn ""
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list]
- set flen [llength $file_list]
- puts "Subdb011: $method ($args) $ndups overflow dups with \
- $flen filename=key filecontents=data pairs"
-
- puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
- set slist {}
- set i 0
- set count 0
- foreach f $file_list {
- set i [expr $i % $nsubdbs]
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set names([expr $count + 1]) $f
- } else {
- set key $f
- }
- # Should really catch errors
- set fid [open $f r]
- fconfigure $fid -translation binary
- set filecont [read $fid]
- set subdb subdb$i
- lappend slist $subdb
- close $fid
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
- for {set dup 0} {$dup < $ndups} {incr dup} {
- set data $dup:$filecont
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key \
- [chop_data $method $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- error_check_good dbclose [$db close] 0
- incr i
- incr count
- }
-
- puts "\tSubdb011.b: Verify overflow pages"
- foreach subdb $slist {
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set stat [$db stat]
-
- # What everyone else calls overflow pages, hash calls "big
- # pages", so we need to special-case hash here. (Hash
- # overflow pages are additional pages after the first in a
- # bucket.)
- if { [string compare [$db get_type] hash] == 0 } {
- error_check_bad overflow \
- [is_substr $stat "{{Number of big pages} 0}"] 1
- } else {
- error_check_bad overflow \
- [is_substr $stat "{{Overflow pages} 0}"] 1
- }
- error_check_good dbclose [$db close] 0
- }
-
- puts "\tSubdb011.c: Delete subdatabases"
- for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
- #
- # Randomly delete a subdatabase
- set sindex [berkdb random_int 0 [expr $i - 1]]
- set subdb [lindex $slist $sindex]
- #
- # Delete the one we did from the list
- set slist [lreplace $slist $sindex $sindex]
- error_check_good file_exists_before [file exists $tfpath] 1
- error_check_good db_remove [eval {berkdb dbremove} $envargs \
- {$testfile $subdb}] 0
- }
-}
-
diff --git a/storage/bdb/test/sdb012.tcl b/storage/bdb/test/sdb012.tcl
deleted file mode 100644
index d316acb7111..00000000000
--- a/storage/bdb/test/sdb012.tcl
+++ /dev/null
@@ -1,428 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdb012.tcl,v 1.6 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdb012
-# TEST Test subdbs with locking and transactions
-# TEST Tests creating and removing subdbs while handles
-# TEST are open works correctly, and in the face of txns.
-#
-proc sdb012 { method args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_queue $method] == 1 } {
- puts "Subdb012: skipping for method $method"
- return
- }
-
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Subdb012 skipping for env $env"
- return
- }
- set encargs ""
- set largs [split_encargs $args encargs]
-
- puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
-
- #
- # sdb012_body takes a txn list containing 4 elements.
- # {txn command for first subdb
- # txn command for second subdb
- # txn command for first subdb removal
- # txn command for second subdb removal}
- #
- # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
- # 'none' is a special case meaning run without a txn. In the
- # case where all 4 items are 'none', we run in a lock-only env.
- # 'one' is a special case meaning we create the subdbs together
- # in one single transaction. It is indicated as the value for t1,
- # and the value in t2 indicates if that single txn should be
- # aborted or committed. It is not used and has no meaning
- # in the removal case. 'auto' means use the -auto_commit flag
- # to the operation, and 'abort' and 'commit' do the obvious.
- #
- # First test locking w/o txns. If any in tlist are 'none',
- # all must be none.
- #
- # Now run through the txn-based operations
- set count 0
- set sdb "Subdb012."
- set teststr "abcdefghijklmnopqrstuvwxyz"
- set testlet [split $teststr {}]
- foreach t1 { none one abort auto commit } {
- foreach t2 { none abort auto commit } {
- if { $t1 == "one" } {
- if { $t2 == "none" || $t2 == "auto"} {
- continue
- }
- }
- set tlet [lindex $testlet $count]
- foreach r1 { none abort auto commit } {
- foreach r2 { none abort auto commit } {
- set tlist [list $t1 $t2 $r1 $r2]
- sdb012_body $testdir $omethod $largs \
- $encargs $sdb$tlet $tlist
- }
- }
- incr count
- }
- }
-
-}
-
-proc s012 { method args } {
- source ./include.tcl
-
- set omethod [convert_method $method]
-
- set encargs ""
- set largs ""
-
- puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
-
- set sdb "Subdb012."
- set tlet X
- set tlist $args
- error_check_good tlist [llength $tlist] 4
- sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
-}
-
-#
-# This proc checks the tlist values and returns the flags
-# that should be used when opening the env. If we are running
-# with no txns, then just -lock, otherwise -txn.
-#
-proc sdb012_subsys { tlist } {
- set t1 [lindex $tlist 0]
- #
- # If we have no txns, all elements of the list should be none.
- # In that case we only run with locking turned on.
- # Otherwise, we use the full txn subsystems.
- #
- set allnone {none none none none}
- if { $allnone == $tlist } {
- set subsys "-lock"
- } else {
- set subsys "-txn"
- }
- return $subsys
-}
-
-#
-# This proc parses the tlist and returns a list of 4 items that
-# should be used in operations. I.e. it will begin the txns as
-# needed, or return a -auto_commit flag, etc.
-#
-proc sdb012_tflags { env tlist } {
- set ret ""
- set t1 ""
- foreach t $tlist {
- switch $t {
- one {
- set t1 [$env txn]
- error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
- lappend ret "-txn $t1"
- lappend ret "-txn $t1"
- }
- auto {
- lappend ret "-auto_commit"
- }
- abort -
- commit {
- #
- # If the previous command was a "one", skip over
- # this commit/abort. Otherwise start a new txn
- # for the removal case.
- #
- if { $t1 == "" } {
- set txn [$env txn]
- error_check_good txnbegin [is_valid_txn $txn \
- $env] TRUE
- lappend ret "-txn $txn"
- } else {
- set t1 ""
- }
- }
- none {
- lappend ret ""
- }
- default {
- error "Txn command $t not implemented"
- }
- }
- }
- return $ret
-}
-
-#
-# This proc parses the tlist and returns a list of 4 items that
-# should be used in the txn conclusion operations. I.e. it will
-# give "" if using auto_commit (i.e. no final txn op), or a single
-# abort/commit if both subdb's are in one txn.
-#
-proc sdb012_top { tflags tlist } {
- set ret ""
- set t1 ""
- #
- # We know both lists have 4 items. Iterate over them
- # using multiple value lists so we know which txn goes
- # with each op.
- #
- # The tflags list is needed to extract the txn command
- # out for the operation. The tlist list is needed to
- # determine what operation we are doing.
- #
- foreach t $tlist tf $tflags {
- switch $t {
- one {
- set t1 [lindex $tf 1]
- }
- auto {
- lappend ret "sdb012_nop"
- }
- abort -
- commit {
- #
- # If the previous command was a "one" (i.e. t1
- # is set), append a correct command and then
- # an empty one.
- #
- if { $t1 == "" } {
- set txn [lindex $tf 1]
- set top "$txn $t"
- lappend ret $top
- } else {
- set top "$t1 $t"
- lappend ret "sdb012_nop"
- lappend ret $top
- set t1 ""
- }
- }
- none {
- lappend ret "sdb012_nop"
- }
- }
- }
- return $ret
-}
-
-proc sdb012_nop { } {
- return 0
-}
-
-proc sdb012_isabort { tlist item } {
- set i [lindex $tlist $item]
- if { $i == "one" } {
- set i [lindex $tlist [expr $item + 1]]
- }
- if { $i == "abort" } {
- return 1
- } else {
- return 0
- }
-}
-
-proc sdb012_body { testdir omethod largs encargs msg tlist } {
-
- puts "\t$msg: $tlist"
- set testfile subdb012.db
- set subdb1 sub1
- set subdb2 sub2
-
- set subsys [sdb012_subsys $tlist]
- env_cleanup $testdir
- set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
- error_check_good dbenv [is_valid_env $env] TRUE
- error_check_good test_lock [$env test abort subdb_lock] 0
-
- #
- # Convert from our tlist txn commands into real flags we
- # will pass to commands. Use the multiple values feature
- # of foreach to do this efficiently.
- #
- set tflags [sdb012_tflags $env $tlist]
- foreach {txn1 txn2 rem1 rem2} $tflags {break}
- foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
-
-# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
-# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
- puts "\t$msg.0: Create sub databases in env with $subsys"
- set s1 [eval {berkdb_open -env $env -create -mode 0644} \
- $largs $txn1 {$omethod $testfile $subdb1}]
- error_check_good dbopen [is_valid_db $s1] TRUE
-
- set ret [eval $top1]
- error_check_good t1_end $ret 0
-
- set s2 [eval {berkdb_open -env $env -create -mode 0644} \
- $largs $txn2 {$omethod $testfile $subdb2}]
- error_check_good dbopen [is_valid_db $s2] TRUE
-
- puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
- set ret [eval $top2]
- error_check_good t2_end $ret 0
-
- set t1_isabort [sdb012_isabort $tlist 0]
- set t2_isabort [sdb012_isabort $tlist 1]
- set r1_isabort [sdb012_isabort $tlist 2]
- set r2_isabort [sdb012_isabort $tlist 3]
-
-# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
-
- puts "\t$msg.2: Subdbs are open; verify removal failures"
- # Verify removes of subdbs with open subdb's fail
- #
- # We should fail no matter what. If we aborted, then the
- # subdb should not exist. If we didn't abort, we should fail
- # with DB_LOCK_NOTGRANTED.
- #
- # XXX - Do we need -auto_commit for all these failing ones?
- set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
- error_check_bad dbremove1_open $r 0
- if { $t1_isabort } {
- error_check_good dbremove1_open_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremove1_open [is_substr \
- $result DB_LOCK_NOTGRANTED] 1
- }
-
- set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
- error_check_bad dbremove2_open $r 0
- if { $t2_isabort } {
- error_check_good dbremove2_open_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremove2_open [is_substr \
- $result DB_LOCK_NOTGRANTED] 1
- }
-
- # Verify file remove fails
- set r [catch {berkdb dbremove -env $env $testfile} result]
- error_check_bad dbremovef_open $r 0
-
- #
- # If both aborted, there should be no file??
- #
- if { $t1_isabort && $t2_isabort } {
- error_check_good dbremovef_open_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremovef_open [is_substr \
- $result DB_LOCK_NOTGRANTED] 1
- }
-
- puts "\t$msg.3: Close subdb2; verify removals"
- error_check_good close_s2 [$s2 close] 0
- set r [ catch {eval {berkdb dbremove -env} \
- $env $rem2 $testfile $subdb2} result ]
- if { $t2_isabort } {
- error_check_bad dbrem2_ab $r 0
- error_check_good dbrem2_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbrem2 $result 0
- }
- # Resolve subdb2 removal txn
- set r [eval $rop2]
- error_check_good rop2 $r 0
-
- set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
- error_check_bad dbremove1.2_open $r 0
- if { $t1_isabort } {
- error_check_good dbremove1.2_open_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremove1.2_open [is_substr \
- $result DB_LOCK_NOTGRANTED] 1
- }
-
- # There are three cases here:
- # 1. if both t1 and t2 aborted, the file shouldn't exist
- # 2. if only t1 aborted, the file still exists and nothing is open
- # 3. if neither aborted a remove should fail because the first
- # subdb is still open
- # In case 2, don't try the remove, because it should succeed
- # and we won't be able to test anything else.
- if { !$t1_isabort || $t2_isabort } {
- set r [catch {berkdb dbremove -env $env $testfile} result]
- if { $t1_isabort && $t2_isabort } {
- error_check_bad dbremovef.2_open $r 0
- error_check_good dbremove.2_open_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_bad dbremovef.2_open $r 0
- error_check_good dbremove.2_open [is_substr \
- $result DB_LOCK_NOTGRANTED] 1
- }
- }
-
- puts "\t$msg.4: Close subdb1; verify removals"
- error_check_good close_s1 [$s1 close] 0
- set r [ catch {eval {berkdb dbremove -env} \
- $env $rem1 $testfile $subdb1} result ]
- if { $t1_isabort } {
- error_check_bad dbremove1_ab $r 0
- error_check_good dbremove1_ab [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremove1 $result 0
- }
- # Resolve subdb1 removal txn
- set r [eval $rop1]
- error_check_good rop1 $r 0
-
-
- # Verify removal of subdb2. All DB handles are closed now.
- # So we have two scenarios:
- # 1. The removal of subdb2 above was successful and subdb2
- # doesn't exist and we should fail that way.
- # 2. The removal of subdb2 above was aborted, and this
- # removal should succeed.
- #
- set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
- if { $r2_isabort && !$t2_isabort } {
- error_check_good dbremove2.1_ab $result 0
- } else {
- error_check_bad dbremove2.1 $r 0
- error_check_good dbremove2.1 [is_substr \
- $result "no such file"] 1
- }
-
- # Verify removal of subdb1. All DB handles are closed now.
- # So we have two scenarios:
- # 1. The removal of subdb1 above was successful and subdb1
- # doesn't exist and we should fail that way.
- # 2. The removal of subdb1 above was aborted, and this
- # removal should succeed.
- #
- set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
- if { $r1_isabort && !$t1_isabort } {
- error_check_good dbremove1.1 $result 0
- } else {
- error_check_bad dbremove_open $r 0
- error_check_good dbremove.1 [is_substr \
- $result "no such file"] 1
- }
-
- puts "\t$msg.5: All closed; remove file"
- set r [catch {berkdb dbremove -env $env $testfile} result]
- if { $t1_isabort && $t2_isabort } {
- error_check_bad dbremove_final_ab $r 0
- error_check_good dbremove_file_abstr [is_substr \
- $result "no such file"] 1
- } else {
- error_check_good dbremove_final $r 0
- }
- error_check_good envclose [$env close] 0
-}
diff --git a/storage/bdb/test/sdbscript.tcl b/storage/bdb/test/sdbscript.tcl
deleted file mode 100644
index 645351b077e..00000000000
--- a/storage/bdb/test/sdbscript.tcl
+++ /dev/null
@@ -1,47 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdbscript.tcl,v 11.11 2004/01/28 03:36:29 bostic Exp $
-#
-# Usage: subdbscript testfile subdbnumber factor
-# testfile: name of DB itself
-# subdbnumber: n, subdb indicator, of form sub$n.db
-# factor: Delete over factor'th + n'th from my subdb.
-#
-# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
-# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
-source ./include.tcl
-source $test_path/test.tcl
-
-set usage "subdbscript testfile subdbnumber factor"
-
-# Verify usage
-if { $argc != 3 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set testfile [lindex $argv 0]
-set n [ lindex $argv 1 ]
-set factor [ lindex $argv 2 ]
-
-set db [berkdb_open -unknown $testfile sub$n.db]
-error_check_good db_open [is_valid_db $db] TRUE
-
-set dbc [$db cursor]
-error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-set i 1
-for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
- set x [expr $i - $n]
- if { $x >= 0 && [expr $x % $factor] == 0 } {
- puts "Deleting $d"
- error_check_good dbc_del [$dbc del] 0
- }
- incr i
-}
-error_check_good db_close [$db close] 0
-
-exit
diff --git a/storage/bdb/test/sdbtest001.tcl b/storage/bdb/test/sdbtest001.tcl
deleted file mode 100644
index 65b64dac2d3..00000000000
--- a/storage/bdb/test/sdbtest001.tcl
+++ /dev/null
@@ -1,150 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdbtest001.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $
-#
-# TEST sdbtest001
-# TEST Tests multiple access methods in one subdb
-# TEST Open several subdbs, each with a different access method
-# TEST Small keys, small data
-# TEST Put/get per key per subdb
-# TEST Dump file, verify per subdb
-# TEST Close, reopen per subdb
-# TEST Dump file, verify per subdb
-# TEST
-# TEST Make several subdb's of different access methods all in one DB.
-# TEST Rotate methods and repeat [#762].
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-proc sdbtest001 { {nentries 10000} } {
- source ./include.tcl
-
- puts "Subdbtest001: many different subdb access methods in one"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdbtest001.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- set txn ""
- set count 0
-
- # Set up various methods to rotate through
- lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
- lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
- lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
- lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
- lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
- lappend method_list [list "-rrecno" "-recno"]
- lappend method_list [list "-recno" "-rrecno"]
- lappend method_list [list "-hash" "-dhash"]
- lappend method_list [list "-dhash" "-hash"]
- lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
- lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
- lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
- lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
- set plist [list 512 8192 1024 4096 2048 16384]
- set mlen [llength $method_list]
- set plen [llength $plist]
- while { $plen < $mlen } {
- set plist [concat $plist $plist]
- set plen [llength $plist]
- }
- set pgsz 0
- foreach methods $method_list {
- cleanup $testdir NULL
- puts "\tSubdbtest001.a: create subdbs of different access methods:"
- puts "\tSubdbtest001.a: $methods"
- set nsubdbs [llength $methods]
- set duplist ""
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- lappend duplist -1
- }
- set psize [lindex $plist $pgsz]
- incr pgsz
- set newent [expr $nentries / $nsubdbs]
- build_all_subdb $testfile $methods $psize $duplist $newent
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
-
- set method [lindex $methods $subdb]
- set method [convert_method $method]
- if { [is_record_based $method] == 1 } {
- set checkfunc subdbtest001_recno.check
- } else {
- set checkfunc subdbtest001.check
- }
-
- puts "\tSubdbtest001.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the
- # dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $newent} {incr i} {
- puts $oid [expr $subdb * $newent + $i]
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- # filehead uses 1-based line numbers
- set beg [expr $subdb * $newent]
- incr beg
- set end [expr $beg + $newent - 1]
- filehead $end $dict $t3 $beg
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest001:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [string compare $method "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest001:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
-
- if { [string compare $method "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest001:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- }
- }
-}
-
-# Check function for Subdbtest001; keys and data are identical
-proc subdbtest001.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc subdbtest001_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/sdbtest002.tcl b/storage/bdb/test/sdbtest002.tcl
deleted file mode 100644
index 4e51809fbb5..00000000000
--- a/storage/bdb/test/sdbtest002.tcl
+++ /dev/null
@@ -1,173 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdbtest002.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST sdbtest002
-# TEST Tests multiple access methods in one subdb access by multiple
-# TEST processes.
-# TEST Open several subdbs, each with a different access method
-# TEST Small keys, small data
-# TEST Put/get per key per subdb
-# TEST Fork off several child procs to each delete selected
-# TEST data from their subdb and then exit
-# TEST Dump file, verify contents of each subdb is correct
-# TEST Close, reopen per subdb
-# TEST Dump file, verify per subdb
-# TEST
-# TEST Make several subdb's of different access methods all in one DB.
-# TEST Fork of some child procs to each manipulate one subdb and when
-# TEST they are finished, verify the contents of the databases.
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-proc sdbtest002 { {nentries 10000} } {
- source ./include.tcl
-
- puts "Subdbtest002: many different subdb access methods in one"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdbtest002.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- set txn ""
- set count 0
-
- # Set up various methods to rotate through
- set methods \
- [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
- cleanup $testdir NULL
- puts "\tSubdbtest002.a: create subdbs of different methods: $methods"
- set psize 4096
- set nsubdbs [llength $methods]
- set duplist ""
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- lappend duplist -1
- }
- set newent [expr $nentries / $nsubdbs]
-
- #
- # XXX We need dict sorted to figure out what was deleted
- # since things are stored sorted in the btree.
- #
- filesort $dict $t4
- set dictorig $dict
- set dict $t4
-
- build_all_subdb $testfile $methods $psize $duplist $newent
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- set pidlist ""
- puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- puts "$tclsh_path\
- $test_path/sdbscript.tcl $testfile \
- $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- sdbscript.tcl \
- $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
- lappend pidlist $p
- }
- watch_procs $pidlist 5
-
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- set method [lindex $methods $subdb]
- set method [convert_method $method]
- if { [is_record_based $method] == 1 } {
- set checkfunc subdbtest002_recno.check
- } else {
- set checkfunc subdbtest002.check
- }
-
- puts "\tSubdbtest002.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- error_check_good db_open [is_valid_db $db] TRUE
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
- #
- # This is just so that t2 is there and empty
- # since we are only appending below.
- #
- exec > $t2
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $newent} {incr i} {
- set x [expr $i - $subdb]
- if { [expr $x % $nsubdbs] != 0 } {
- puts $oid [expr $subdb * $newent + $i]
- }
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set oid [open $t4 r]
- for {set i 1} {[gets $oid line] >= 0} {incr i} {
- set farr($i) $line
- }
- close $oid
-
- set oid [open $t2 w]
- for {set i 1} {$i <= $newent} {incr i} {
- # Sed uses 1-based line numbers
- set x [expr $i - $subdb]
- if { [expr $x % $nsubdbs] != 0 } {
- set beg [expr $subdb * $newent]
- set beg [expr $beg + $i]
- puts $oid $farr($beg)
- }
- }
- close $oid
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [string compare $method "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest002:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
-
- if { [string compare $method "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdbtest002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- }
- set dict $dictorig
- return
-}
-
-# Check function for Subdbtest002; keys and data are identical
-proc subdbtest002.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc subdbtest002_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/sdbutils.tcl b/storage/bdb/test/sdbutils.tcl
deleted file mode 100644
index 5c6deb3b47b..00000000000
--- a/storage/bdb/test/sdbutils.tcl
+++ /dev/null
@@ -1,197 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sdbutils.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $
-#
-proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
- set nsubdbs [llength $dups]
- set mlen [llength $methods]
- set savearg $dbargs
- for {set i 0} {$i < $nsubdbs} { incr i } {
- set m [lindex $methods [expr $i % $mlen]]
- set dbargs $savearg
- subdb_build $dbname $nentries [lindex $dups $i] \
- $i $m $psize sub$i.db $dbargs
- }
-}
-
-proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
- source ./include.tcl
-
- set dbargs [convert_args $method $dbargs]
- set omethod [convert_method $method]
-
- puts "Method: $method"
-
- set txnenv 0
- set eindex [lsearch -exact $dbargs "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $dbargs $eindex]
- set txnenv [is_txnenv $env]
- }
- # Create the database and open the dictionary
- set oflags "-create -mode 0644 $omethod \
- -pagesize $psize $dbargs $name $subdb"
- set db [eval {berkdb_open} $oflags]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
- set count 0
- if { $ndups >= 0 } {
- puts "\tBuilding $method $name $subdb. \
- $nkeys keys with $ndups duplicates at interval of $dup_interval"
- }
- if { $ndups < 0 } {
- puts "\tBuilding $method $name $subdb. \
- $nkeys unique keys of pagesize $psize"
- #
- # If ndups is < 0, we want unique keys in each subdb,
- # so skip ahead in the dict by nkeys * iteration
- #
- for { set count 0 } \
- { $count < [expr $nkeys * $dup_interval] } {
- incr count} {
- set ret [gets $did str]
- if { $ret == -1 } {
- break
- }
- }
- }
- set txn ""
- for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
- incr count} {
- for { set i 0 } { $i < $ndups } { incr i } {
- set data [format "%04d" [expr $i * $dup_interval]]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$str \
- [chop_data $method $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { $ndups == 0 } {
- set ret [eval {$db put} $txn {$str \
- [chop_data $method NODUP]}]
- error_check_good put $ret 0
- } elseif { $ndups < 0 } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set num [expr $nkeys * $dup_interval]
- set num [expr $num + $count + 1]
- set ret [eval {$db put} $txn {$num \
- [chop_data $method $str]}]
- set kvals($num) [pad_data $method $str]
- error_check_good put $ret 0
- } else {
- set ret [eval {$db put} $txn \
- {$str [chop_data $method $str]}]
- error_check_good put $ret 0
- }
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- close $did
- error_check_good close:$name [$db close] 0
-}
-
-proc do_join_subdb { db primary subdbs key oargs } {
- source ./include.tcl
-
- puts "\tJoining: $subdbs on $key"
-
- # Open all the databases
- set p [eval {berkdb_open -unknown} $oargs $db $primary]
- error_check_good "primary open" [is_valid_db $p] TRUE
-
- set dblist ""
- set curslist ""
-
- foreach i $subdbs {
- set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
- error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
-
- lappend jlist [list $jdb $key]
- lappend dblist $jdb
-
- }
-
- set join_res [eval {$p get_join} $jlist]
- set ndups [llength $join_res]
-
- # Calculate how many dups we expect.
- # We go through the list of indices. If we find a 0, then we
- # expect 0 dups. For everything else, we look at pairs of numbers,
- # if the are relatively prime, multiply them and figure out how
- # many times that goes into 50. If they aren't relatively prime,
- # take the number of times the larger goes into 50.
- set expected 50
- set last 1
- foreach n $subdbs {
- if { $n == 0 } {
- set expected 0
- break
- }
- if { $last == $n } {
- continue
- }
-
- if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
- if { $n > $last } {
- set last $n
- set expected [expr 50 / $last]
- }
- } else {
- set last [expr $n * $last / [gcd $n $last]]
- set expected [expr 50 / $last]
- }
- }
-
- error_check_good number_of_dups:$subdbs $ndups $expected
-
- #
- # If we get here, we have the number expected, now loop
- # through each and see if it is what we expected.
- #
- for { set i 0 } { $i < $ndups } { incr i } {
- set pair [lindex $join_res $i]
- set k [lindex $pair 0]
- foreach j $subdbs {
- error_check_bad valid_dup:$j:$subdbs $j 0
- set kval [string trimleft $k 0]
- if { [string length $kval] == 0 } {
- set kval 0
- }
- error_check_good \
- valid_dup:$j:$subdbs [expr $kval % $j] 0
- }
- }
-
- error_check_good close_primary [$p close] 0
- foreach i $dblist {
- error_check_good close_index:$i [$i close] 0
- }
-}
-
-proc n_to_subname { n } {
- if { $n == 0 } {
- return null.db;
- } else {
- return sub$n.db;
- }
-}
diff --git a/storage/bdb/test/sec001.tcl b/storage/bdb/test/sec001.tcl
deleted file mode 100644
index ed2ca81f9d4..00000000000
--- a/storage/bdb/test/sec001.tcl
+++ /dev/null
@@ -1,223 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sec001.tcl,v 11.12 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST sec001
-# TEST Test of security interface
-proc sec001 { } {
- global errorInfo
- global errorCode
- global has_crypto
- global is_hp_test
-
- source ./include.tcl
- # Skip test if release does not support encryption.
- if { $has_crypto == 0 } {
- puts "Skipping test sec001 for non-crypto release."
- return
- }
-
- set testfile1 env1.db
- set testfile2 $testdir/env2.db
- set subdb1 sub1
- set subdb2 sub2
-
- puts "Sec001: Test of basic encryption interface."
- env_cleanup $testdir
-
- set passwd1 "passwd1"
- set passwd1_bad "passwd1_bad"
- set passwd2 "passwd2"
- set key "key"
- set data "data"
-
- #
- # This first group tests bad create scenarios and also
- # tests attempting to use encryption after creating a
- # non-encrypted env/db to begin with.
- #
- set nopass ""
- puts "\tSec001.a.1: Create db with encryption."
- set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\tSec001.a.2: Open db without encryption."
- set stat [catch {berkdb_open_noerr $testfile2} ret]
- error_check_good db:nocrypto $stat 1
- error_check_good db:fail [is_substr $ret "no encryption key"] 1
-
- set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
-
- puts "\tSec001.b.1: Create db without encryption or checksum."
- set db [berkdb_open -create -btree $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\tSec001.b.2: Open db with encryption."
- set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
- error_check_good db:nocrypto $stat 1
- error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
-
- set ret [berkdb dbremove $testfile2]
-
- puts "\tSec001.c.1: Create db with checksum."
- set db [berkdb_open -create -chksum -btree $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\tSec001.c.2: Open db with encryption."
- set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
- error_check_good db:nocrypto $stat 1
- error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
-
- set ret [berkdb dbremove $testfile2]
-
- puts "\tSec001.d.1: Create subdb with encryption."
- set db [berkdb_open -create -encryptaes $passwd1 -btree \
- $testfile2 $subdb1]
- error_check_good subdb [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\tSec001.d.2: Create 2nd subdb without encryption."
- set stat [catch {berkdb_open_noerr -create -btree \
- $testfile2 $subdb2} ret]
- error_check_good subdb:nocrypto $stat 1
- error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
-
- set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
-
- puts "\tSec001.e.1: Create subdb without encryption or checksum."
- set db [berkdb_open -create -btree $testfile2 $subdb1]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\tSec001.e.2: Create 2nd subdb with encryption."
- set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
- $testfile2 $subdb2} ret]
- error_check_good subdb:nocrypto $stat 1
- error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
-
- env_cleanup $testdir
-
- puts "\tSec001.f.1: Open env with encryption, empty passwd."
- set stat [catch {berkdb_env_noerr -create -home $testdir \
- -encryptaes $nopass} ret]
- error_check_good env:nopass $stat 1
- error_check_good env:fail [is_substr $ret "Empty password"] 1
-
- puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
- set stat [catch {berkdb_env_noerr -create -home $testdir \
- -encryptany $passwd1} ret]
- error_check_good env:any $stat 1
- error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
-
- puts "\tSec001.f.3: Create without encryption."
- set env [berkdb_env -create -home $testdir]
- error_check_good env [is_valid_env $env] TRUE
-
- # Skip this piece of the test on HP-UX, where we can't
- # join the env.
- if { $is_hp_test != 1 } {
- puts "\tSec001.f.4: Open again with encryption."
- set stat [catch {berkdb_env_noerr -home $testdir \
- -encryptaes $passwd1} ret]
- error_check_good env:unencrypted $stat 1
- error_check_good env:fail [is_substr $ret \
- "Joining non-encrypted environment"] 1
- }
-
- error_check_good envclose [$env close] 0
-
- env_cleanup $testdir
-
- #
- # This second group tests creating and opening a secure env.
- # We test that others can join successfully, and that other's with
- # bad/no passwords cannot. Also test that we cannot use the
- # db->set_encrypt method when we've already got a secure dbenv.
- #
- puts "\tSec001.g.1: Open with encryption."
- set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
- error_check_good env [is_valid_env $env] TRUE
-
- # We can't open an env twice in HP-UX, so skip the rest.
- if { $is_hp_test == 1 } {
- puts "Skipping remainder of test for HP-UX."
- error_check_good env_close [$env close] 0
- return
- }
-
- puts "\tSec001.g.2: Open again with encryption - same passwd."
- set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
- error_check_good env [is_valid_env $env1] TRUE
- error_check_good envclose [$env1 close] 0
-
- puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
- set env1 [berkdb_env -home $testdir -encryptany $passwd1]
- error_check_good env [is_valid_env $env1] TRUE
- error_check_good envclose [$env1 close] 0
-
- puts "\tSec001.g.4: Open with encryption - different length passwd."
- set stat [catch {berkdb_env_noerr -home $testdir \
- -encryptaes $passwd1_bad} ret]
- error_check_good env:$passwd1_bad $stat 1
- error_check_good env:fail [is_substr $ret "Invalid password"] 1
-
- puts "\tSec001.g.5: Open with encryption - different passwd."
- set stat [catch {berkdb_env_noerr -home $testdir \
- -encryptaes $passwd2} ret]
- error_check_good env:$passwd2 $stat 1
- error_check_good env:fail [is_substr $ret "Invalid password"] 1
-
- puts "\tSec001.g.6: Open env without encryption."
- set stat [catch {berkdb_env_noerr -home $testdir} ret]
- error_check_good env:$passwd2 $stat 1
- error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
-
- puts "\tSec001.g.7: Open database with encryption in env"
- set stat [catch {berkdb_open_noerr -env $env -btree -create \
- -encryptaes $passwd2 $testfile1} ret]
- error_check_good db:$passwd2 $stat 1
- error_check_good env:fail [is_substr $ret "method not permitted"] 1
-
- puts "\tSec001.g.8: Close creating env"
- error_check_good envclose [$env close] 0
-
- #
- # This third group tests opening the env after the original env
- # handle is closed. Just to make sure we can reopen it in
- # the right fashion even if no handles are currently open.
- #
- puts "\tSec001.h.1: Reopen without encryption."
- set stat [catch {berkdb_env_noerr -home $testdir} ret]
- error_check_good env:noencrypt $stat 1
- error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
-
- puts "\tSec001.h.2: Reopen with bad passwd."
- set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
- $passwd1_bad} ret]
- error_check_good env:$passwd1_bad $stat 1
- error_check_good env:fail [is_substr $ret "Invalid password"] 1
-
- puts "\tSec001.h.3: Reopen with encryption."
- set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
- error_check_good env [is_valid_env $env] TRUE
-
- puts "\tSec001.h.4: 2nd Reopen with encryption."
- set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
- error_check_good env [is_valid_env $env1] TRUE
-
- error_check_good envclose [$env1 close] 0
- error_check_good envclose [$env close] 0
-
- puts "\tSec001 complete."
-}
diff --git a/storage/bdb/test/sec002.tcl b/storage/bdb/test/sec002.tcl
deleted file mode 100644
index 5bdd4af3bcc..00000000000
--- a/storage/bdb/test/sec002.tcl
+++ /dev/null
@@ -1,181 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sec002.tcl,v 11.13 2004/11/02 16:12:04 carol Exp $
-#
-# TEST sec002
-# TEST Test of security interface and catching errors in the
-# TEST face of attackers overwriting parts of existing files.
-proc sec002 { } {
- global errorInfo
- global errorCode
- global has_crypto
-
- source ./include.tcl
-
- # Skip test if release does not support encryption.
- if { $has_crypto == 0 } {
- puts "Skipping test sec002 for non-crypto release."
- return
- }
-
- set testfile1 $testdir/sec002-1.db
- set testfile2 $testdir/sec002-2.db
- set testfile3 $testdir/sec002-3.db
- set testfile4 $testdir/sec002-4.db
-
- puts "Sec002: Test of basic encryption interface."
- env_cleanup $testdir
-
- set passwd1 "passwd1"
- set passwd2 "passwd2"
- set key "key"
- set data "data"
- set pagesize 1024
-
- #
- # Set up 4 databases, two encrypted, but with different passwords
- # and one unencrypt, but with checksumming turned on and one
- # unencrypted and no checksumming. Place the exact same data
- # in each one.
- #
- puts "\tSec002.a: Setup databases"
- set db_cmd "-create -pagesize $pagesize -btree "
- set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- set db [eval {berkdb_open} $db_cmd $testfile4]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- #
- # If we reopen the normal file with the -chksum flag, there
- # should be no error and checksumming should be ignored.
- # If we reopen a checksummed file without the -chksum flag,
- # checksumming should still be in effect. [#6959]
- #
- puts "\tSec002.b: Inheritance of chksum properties"
- puts "\t\tSec002.b1: Reopen ordinary file with -chksum flag"
- set db [eval {berkdb_open} -chksum $testfile4]
- error_check_good open_with_chksum [is_valid_db $db] TRUE
- set retdata [$db get $key]
- error_check_good testfile4_get [lindex [lindex $retdata 0] 1] $data
- error_check_good dbclose [$db close] 0
-
- puts "\t\tSec002.b2: Reopen checksummed file without -chksum flag"
- set db [eval {berkdb_open} $testfile3]
- error_check_good open_wo_chksum [is_valid_db $db] TRUE
- set retdata [$db get $key]
- error_check_good testfile3_get [lindex [lindex $retdata 0] 1] $data
- error_check_good dbclose [$db close] 0
-
- #
- # First just touch some bits in the file. We know that in btree
- # meta pages, bytes 92-459 are unused. Scribble on them in both
- # an encrypted, and both unencrypted files. We should get
- # a checksum error for the encrypted, and checksummed files.
- # We should get no error for the normal file.
- #
- set fidlist {}
- set fid [open $testfile1 r+]
- lappend fidlist $fid
- set fid [open $testfile3 r+]
- lappend fidlist $fid
- set fid [open $testfile4 r+]
- lappend fidlist $fid
-
- puts "\tSec002.c: Overwrite unused space in meta-page"
- foreach f $fidlist {
- fconfigure $f -translation binary
- seek $f 100 start
- set byte [read $f 1]
- binary scan $byte c val
- set newval [expr ~$val]
- set newbyte [binary format c $newval]
- seek $f 100 start
- puts -nonewline $f $newbyte
- close $f
- }
- puts "\tSec002.d: Reopen modified databases"
- set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
- error_check_good db:$testfile1 $stat 1
- error_check_good db:$testfile1:fail \
- [is_substr $ret "metadata page checksum error"] 1
-
- set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
- error_check_good db:$testfile3 $stat 1
- error_check_good db:$testfile3:fail \
- [is_substr $ret "metadata page checksum error"] 1
-
- set stat [catch {berkdb_open_noerr $testfile4} db]
- error_check_good db:$testfile4 $stat 0
- error_check_good dbclose [$db close] 0
-
- # Skip the remainder of the test for Windows platforms.
- # Forcing the error which causes DB_RUNRECOVERY to be
- # returned ends up leaving open files that cannot be removed.
- if { $is_windows_test == 1 } {
- cleanup $testdir NULL 1
- puts "Skipping remainder of test for Windows"
- return
- }
-
- puts "\tSec002.e: Replace root page in encrypted w/ encrypted"
- set fid1 [open $testfile1 r+]
- fconfigure $fid1 -translation binary
- set fid2 [open $testfile2 r+]
- fconfigure $fid2 -translation binary
- seek $fid1 $pagesize start
- seek $fid2 $pagesize start
- fcopy $fid1 $fid2 -size $pagesize
- close $fid1
- close $fid2
-
- set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- set stat [catch {$db get $key} ret]
- error_check_good dbget $stat 1
- error_check_good db:$testfile2:fail1 \
- [is_substr $ret "checksum error"] 1
- set stat [catch {$db close} ret]
- error_check_good dbclose $stat 1
- error_check_good db:$testfile2:fail2 [is_substr $ret "DB_RUNRECOVERY"] 1
-
- puts "\tSec002.f: Replace root page in encrypted w/ unencrypted"
- set fid2 [open $testfile2 r+]
- fconfigure $fid2 -translation binary
- set fid4 [open $testfile4 r+]
- fconfigure $fid4 -translation binary
- seek $fid2 $pagesize start
- seek $fid4 $pagesize start
- fcopy $fid4 $fid2 -size $pagesize
- close $fid4
- close $fid2
-
- set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
- error_check_good db [is_valid_db $db] TRUE
- set stat [catch {$db get $key} ret]
- error_check_good dbget $stat 1
- error_check_good db:$testfile2:fail \
- [is_substr $ret "checksum error"] 1
- set stat [catch {$db close} ret]
- error_check_good dbclose $stat 1
- error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
-
- cleanup $testdir NULL 1
-}
diff --git a/storage/bdb/test/shelltest.tcl b/storage/bdb/test/shelltest.tcl
deleted file mode 100644
index cb588f13378..00000000000
--- a/storage/bdb/test/shelltest.tcl
+++ /dev/null
@@ -1,101 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: shelltest.tcl,v 1.31 2004/03/15 21:09:49 bostic Exp $
-#
-# TEST scr###
-# TEST The scr### directories are shell scripts that test a variety of
-# TEST things, including things about the distribution itself. These
-# TEST tests won't run on most systems, so don't even try to run them.
-#
-# shelltest.tcl:
-# Code to run shell script tests, to incorporate Java, C++,
-# example compilation, etc. test scripts into the Tcl framework.
-proc shelltest {{ run_one 0 } { xml 0 }} {
- source ./include.tcl
- global shelltest_list
- global xmlshelltest_list
-
- set SH /bin/sh
- if { [file executable $SH] != 1 } {
- puts "Shell tests require valid shell /bin/sh: not found."
- puts "Skipping shell tests."
- return 0
- }
-
- if { $xml == 1 } {
- set shelltest_list $xmlshelltest_list
- }
-
- if { $run_one == 0 } {
- puts "Running shell script tests..."
-
- foreach testpair $shelltest_list {
- set dir [lindex $testpair 0]
- set test [lindex $testpair 1]
-
- env_cleanup $testdir
- shelltest_copy $test_path/$dir $testdir
- shelltest_run $SH $dir $test $testdir
- }
- } else {
- set run_one [expr $run_one - 1];
- set dir [lindex [lindex $shelltest_list $run_one] 0]
- set test [lindex [lindex $shelltest_list $run_one] 1]
-
- env_cleanup $testdir
- shelltest_copy $test_path/$dir $testdir
- shelltest_run $SH $dir $test $testdir
- }
-}
-
-proc shelltest_copy { fromdir todir } {
- set globall [glob $fromdir/*]
-
- foreach f $globall {
- file copy $f $todir/
- }
-}
-
-proc shelltest_run { sh srcdir test testdir } {
- puts "Running shell script $srcdir ($test)..."
-
- set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
-
- if { $ret != 0 } {
- puts "FAIL: shell test $srcdir/$test exited abnormally"
- }
-}
-
-proc scr001 {} { shelltest 1 }
-proc scr002 {} { shelltest 2 }
-proc scr003 {} { shelltest 3 }
-proc scr004 {} { shelltest 4 }
-proc scr005 {} { shelltest 5 }
-proc scr006 {} { shelltest 6 }
-proc scr007 {} { shelltest 7 }
-proc scr008 {} { shelltest 8 }
-proc scr009 {} { shelltest 9 }
-proc scr010 {} { shelltest 10 }
-proc scr011 {} { shelltest 11 }
-proc scr012 {} { shelltest 12 }
-proc scr013 {} { shelltest 13 }
-proc scr014 {} { shelltest 14 }
-proc scr015 {} { shelltest 15 }
-proc scr016 {} { shelltest 16 }
-proc scr017 {} { shelltest 17 }
-proc scr018 {} { shelltest 18 }
-proc scr019 {} { shelltest 19 }
-proc scr020 {} { shelltest 20 }
-proc scr021 {} { shelltest 21 }
-proc scr022 {} { shelltest 22 }
-proc scr023 {} { shelltest 23 }
-proc scr024 {} { shelltest 24 }
-proc scr025 {} { shelltest 25 }
-proc scr026 {} { shelltest 26 }
-proc scr027 {} { shelltest 27 }
-proc scr028 {} { shelltest 28 }
-proc scr029 {} { shelltest 29 }
-proc scr030 {} { shelltest 30 }
diff --git a/storage/bdb/test/si001.tcl b/storage/bdb/test/si001.tcl
deleted file mode 100644
index e5cffc0e11a..00000000000
--- a/storage/bdb/test/si001.tcl
+++ /dev/null
@@ -1,178 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si001.tcl,v 1.18 2004/10/27 20:40:25 carol Exp $
-#
-# TEST si001
-# TEST Basic secondary index put/delete test
-# TEST
-# TEST Put data in primary db and check that pget on secondary
-# TEST index finds the right entries. Alter the primary in the
-# TEST following ways, checking for correct data each time:
-# TEST Overwrite data in primary database.
-# TEST Delete half of entries through primary.
-# TEST Delete half of remaining entries through secondary.
-# TEST Append data (for record-based primaries only).
-proc si001 { methods {nentries 200} {tnum "001"} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
- env_cleanup $testdir
-
- set pname "primary$tnum.db"
- set snamebase "secondary$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- # Open and associate the secondaries
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
-
- puts "\tSi$tnum.a: Put loop"
- # Open dictionary and leave open until done with test .e so append
- # won't require configuration for duplicates.
- set did [open $dict]
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
-
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
-
- puts "\tSi$tnum.b: Put/overwrite loop"
- for { set n 0 } { $n < $nentries } { incr n } {
- set newd $data($n).$keys($n)
- set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
- error_check_good put_overwrite($n) $ret 0
- set data($n) [pad_data $pmethod $newd]
- }
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts "\tSi$tnum.c: Primary delete loop: deleting $half entries"
- for { set n $half } { $n < $nentries } { incr n } {
- set ret [$pdb del $keys($n)]
- error_check_good pdel($n) $ret 0
- }
- check_secondaries $pdb $sdbs $half keys data "Si$tnum.c"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set callback [callback_n 0]
- for { set n $quar } { $n < $half } { incr n } {
- set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
- set ret [$sdb del $skey]
- error_check_good sdel($n) $ret 0
- }
- check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d"
- set left $quar
-
- # For queue and recno only, test append, adding back a quarter of
- # the original number of entries.
- if { [is_record_based $pmethod] == 1 } {
- puts "\tSi$tnum.e: Append loop: append $quar entries"
- for { set n $quar } { [gets $did str] != -1 && $n < $half } \
- { incr n } {
- set key [expr $n + 1]
- set datum $str
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set ret [eval {$pdb put} \
- {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
- check_secondaries $pdb $sdbs $half keys data "Si$tnum.e"
- set left $half
- }
-
- close $did
-
- puts "\tSi$tnum.f: Truncate primary, check secondaries are empty."
- error_check_good truncate [$pdb truncate] $left
- foreach sdb $sdbs {
- set scursor [$sdb cursor]
- error_check_good db_cursor [is_substr $scursor $sdb] 1
- set ret [$scursor get -first]
- error_check_good sec_empty [string length $ret] 0
- error_check_good cursor_close [$scursor close] 0
- }
-
-
- puts "\tSi$tnum.g: Closing/disassociating primary first"
- error_check_good primary_close [$pdb close] 0
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good env_close [$env close] 0
-
- # Reopen with _noerr for test of truncate secondary.
- puts "\tSi$tnum.h: Truncate secondary (should fail)"
-
- set env [berkdb_env_noerr -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set pdb [eval {berkdb_open_noerr -create -env} $env \
- $pomethod $pargs $pname]
- set sdb [eval {berkdb_open_noerr -create -env} $env \
- [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db ]
- $pdb associate [callback_n 0] $sdb
-
- set ret [catch {$sdb truncate} ret]
- error_check_good trunc_secondary $ret 1
-
- error_check_good primary_close [$pdb close] 0
- error_check_good secondary_close [$sdb close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/si002.tcl b/storage/bdb/test/si002.tcl
deleted file mode 100644
index 9e38c8a98e6..00000000000
--- a/storage/bdb/test/si002.tcl
+++ /dev/null
@@ -1,175 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si002.tcl,v 1.14 2004/10/27 20:40:25 carol Exp $
-#
-# TEST si002
-# TEST Basic cursor-based secondary index put/delete test
-# TEST
-# TEST Cursor put data in primary db and check that pget
-# TEST on secondary index finds the right entries.
-# TEST Overwrite while walking primary, check pget again.
-# TEST Overwrite while walking secondary (use c_pget), check
-# TEST pget again.
-# TEST Cursor delete half of entries through primary, check.
-# TEST Cursor delete half of remainder through secondary, check.
-proc si002 { methods {nentries 200} {tnum "002"} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
- env_cleanup $testdir
-
- set pname "primary$tnum.db"
- set snamebase "secondary$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- # Open and associate the secondaries
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
-
- puts "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop"
- set did [open $dict]
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set ns($key) $n
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- if { $n % 2 == 0 } {
- set pflag " -keyfirst "
- } else {
- set pflag " -keylast "
- }
-
- set ret [eval {$pdbc put} $pflag \
- {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
- close $did
- error_check_good pdbc_close [$pdbc close] 0
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
-
- puts "\tSi$tnum.b: Cursor put overwrite (-current) loop"
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
- { set dbt [$pdbc get -next] } {
- set key [lindex [lindex $dbt 0] 0]
- set datum [lindex [lindex $dbt 0] 1]
- set newd $datum.$key
- set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
- error_check_good put_overwrite($key) $ret 0
- set data($ns($key)) [pad_data $pmethod $newd]
- }
- error_check_good pdbc_close [$pdbc close] 0
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
-
- puts "\tSi$tnum.c: Secondary c_pget/primary put overwrite loop"
- # We walk the first secondary, then put-overwrite each primary key/data
- # pair we find. This doubles as a DBC->c_pget test.
- set sdb [lindex $sdbs 0]
- set sdbc [$sdb cursor]
- error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
- for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
- { set dbt [$sdbc pget -next] } {
- set pkey [lindex [lindex $dbt 0] 1]
- set pdatum [lindex [lindex $dbt 0] 2]
-
- # Extended entries will be showing up underneath us, in
- # unpredictable places. Keep track of which pkeys
- # we've extended, and don't extend them repeatedly.
- if { [info exists pkeys_done($pkey)] == 1 } {
- continue
- } else {
- set pkeys_done($pkey) 1
- }
-
- set newd $pdatum.[string range $pdatum 0 2]
- set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
- error_check_good pdb_put($pkey) $ret 0
- set data($ns($pkey)) [pad_data $pmethod $newd]
- }
- error_check_good sdbc_close [$sdbc close] 0
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.c"
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts "\tSi$tnum.d:\
- Primary cursor delete loop: deleting $half entries"
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- set dbt [$pdbc get -first]
- for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
- error_check_good pdbc_del [$pdbc del] 0
- set dbt [$pdbc get -next]
- }
- error_check_good pdbc_close [$pdbc close] 0
- cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSi$tnum.e:\
- Secondary cursor delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set sdbc [$sdb cursor]
- set dbt [$sdbc get -first]
- for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
- error_check_good sdbc_del [$sdbc del] 0
- set dbt [$sdbc get -next]
- }
- error_check_good sdbc_close [$sdbc close] 0
- cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e"
-
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good primary_close [$pdb close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/si003.tcl b/storage/bdb/test/si003.tcl
deleted file mode 100644
index 60ddba6a35f..00000000000
--- a/storage/bdb/test/si003.tcl
+++ /dev/null
@@ -1,148 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si003.tcl,v 1.12 2004/10/27 20:40:25 carol Exp $
-#
-# TEST si003
-# TEST si001 with secondaries created and closed mid-test
-# TEST Basic secondary index put/delete test with secondaries
-# TEST created mid-test.
-proc si003 { methods {nentries 200} {tnum "003"} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # There's no reason to run this test on large lists.
- if { $nentries > 1000 } {
- puts "Skipping si003 for large lists (over 1000 items)"
- return
- }
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
- env_cleanup $testdir
-
- set pname "primary$tnum.db"
- set snamebase "secondary$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [eval {berkdb_env -create -home $testdir}]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- puts -nonewline "\tSi$tnum.a: Put loop ... "
- set did [open $dict]
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
- close $did
-
- # Open and associate the secondaries
- set sdbs {}
- puts "opening secondaries."
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -create [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
-
- puts -nonewline "\tSi$tnum.b: Put/overwrite loop ... "
- for { set n 0 } { $n < $nentries } { incr n } {
- set newd $data($n).$keys($n)
- set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
- error_check_good put_overwrite($n) $ret 0
- set data($n) [pad_data $pmethod $newd]
- }
-
- # Close the secondaries again.
- puts "closing secondaries."
- for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
- { set sdb [lindex $sdbs end] } {
- error_check_good second_close($sdb) [$sdb close] 0
- set sdbs [lrange $sdbs 0 end-1]
- check_secondaries \
- $pdb $sdbs $nentries keys data "Si$tnum.b"
- }
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts -nonewline \
- "\tSi$tnum.c: Primary delete loop: deleting $half entries ..."
- for { set n $half } { $n < $nentries } { incr n } {
- set ret [$pdb del $keys($n)]
- error_check_good pdel($n) $ret 0
- }
-
- # Open and associate the secondaries
- set sdbs {}
- puts "\n\t\topening secondaries."
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] \
- $snamebase.r2.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -create [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
- check_secondaries $pdb $sdbs $half keys data "Si$tnum.c"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set callback [callback_n 0]
- for { set n $quar } { $n < $half } { incr n } {
- set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
- set ret [$sdb del $skey]
- error_check_good sdel($n) $ret 0
- }
- check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d"
-
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good primary_close [$pdb close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/si004.tcl b/storage/bdb/test/si004.tcl
deleted file mode 100644
index 08af8b261ce..00000000000
--- a/storage/bdb/test/si004.tcl
+++ /dev/null
@@ -1,200 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si004.tcl,v 1.12 2004/10/27 20:40:25 carol Exp $
-#
-# TEST si004
-# TEST si002 with secondaries created and closed mid-test
-# TEST Basic cursor-based secondary index put/delete test, with
-# TEST secondaries created mid-test.
-proc si004 { methods {nentries 200} {tnum "004"} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # There's no reason to run this test on large lists.
- if { $nentries > 1000 } {
- puts "Skipping si004 for large lists (over 1000 items)."
- return
- }
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
- env_cleanup $testdir
-
- set pname "primary$tnum.db"
- set snamebase "secondary$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- puts -nonewline \
- "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
- set did [open $dict]
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set ns($key) $n
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- if { $n % 2 == 0 } {
- set pflag " -keyfirst "
- } else {
- set pflag " -keylast "
- }
-
- set ret [eval {$pdbc put} $pflag \
- {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- }
- close $did
- error_check_good pdbc_close [$pdbc close] 0
-
- # Open and associate the secondaries
- set sdbs {}
- puts "\n\t\topening secondaries."
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -create [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
-
- puts "\tSi$tnum.b: Cursor put overwrite (-current) loop"
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
- { set dbt [$pdbc get -next] } {
- set key [lindex [lindex $dbt 0] 0]
- set datum [lindex [lindex $dbt 0] 1]
- set newd $datum.$key
- set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
- error_check_good put_overwrite($key) $ret 0
- set data($ns($key)) [pad_data $pmethod $newd]
- }
- error_check_good pdbc_close [$pdbc close] 0
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
-
- puts -nonewline "\tSi$tnum.c:\
- Secondary c_pget/primary put overwrite loop ... "
- # We walk the first secondary, then put-overwrite each primary key/data
- # pair we find. This doubles as a DBC->c_pget test.
- set sdb [lindex $sdbs 0]
- set sdbc [$sdb cursor]
- error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
- for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
- { set dbt [$sdbc pget -next] } {
- set pkey [lindex [lindex $dbt 0] 1]
- set pdatum [lindex [lindex $dbt 0] 2]
-
- # Extended entries will be showing up underneath us, in
- # unpredictable places. Keep track of which pkeys
- # we've extended, and don't extend them repeatedly.
- if { [info exists pkeys_done($pkey)] == 1 } {
- continue
- } else {
- set pkeys_done($pkey) 1
- }
-
- set newd $pdatum.[string range $pdatum 0 2]
- set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
- error_check_good pdb_put($pkey) $ret 0
- set data($ns($pkey)) [pad_data $pmethod $newd]
- }
- error_check_good sdbc_close [$sdbc close] 0
-
- # Close the secondaries again.
- puts "\n\t\tclosing secondaries."
- for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
- { set sdb [lindex $sdbs end] } {
- error_check_good second_close($sdb) [$sdb close] 0
- set sdbs [lrange $sdbs 0 end-1]
- check_secondaries \
- $pdb $sdbs $nentries keys data "Si$tnum.c"
- }
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts -nonewline "\tSi$tnum.d:\
- Primary cursor delete loop: deleting $half entries ... "
- set pdbc [$pdb cursor]
- error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
- set dbt [$pdbc get -first]
- for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
- error_check_good pdbc_del [$pdbc del] 0
- set dbt [$pdbc get -next]
- }
- error_check_good pdbc_close [$pdbc close] 0
-
- set sdbs {}
- puts "\n\t\topening secondaries."
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -env} $env \
- [lindex $omethods $i] [lindex $argses $i] \
- $snamebase.r2.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -create [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
- cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSi$tnum.e:\
- Secondary cursor delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set sdbc [$sdb cursor]
- set dbt [$sdbc get -first]
- for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
- error_check_good sdbc_del [$sdbc del] 0
- set dbt [$sdbc get -next]
- }
- error_check_good sdbc_close [$sdbc close] 0
- cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e"
-
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good primary_close [$pdb close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/si005.tcl b/storage/bdb/test/si005.tcl
deleted file mode 100644
index 292cf1f6092..00000000000
--- a/storage/bdb/test/si005.tcl
+++ /dev/null
@@ -1,135 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si005.tcl,v 11.11 2004/10/27 20:40:25 carol Exp $
-#
-# TEST si005
-# TEST Basic secondary index put/delete test with transactions
-proc si005 { methods {nentries 200} {tnum "005"} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- # Since this is a transaction test, don't allow nentries to be large.
- if { $nentries > 1000 } {
- puts "Skipping si005 for large lists (over 1000 items)."
- return
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
- puts "\twith transactions"
- env_cleanup $testdir
-
- set pname "primary$tnum.db"
- set snamebase "secondary$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
- $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- # Open and associate the secondaries
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -auto_commit -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -auto_commit [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
-
- puts "\tSi$tnum.a: Put loop"
- set did [open $dict]
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set txn [$env txn]
- set ret [eval {$pdb put} -txn $txn \
- {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- close $did
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
-
- puts "\tSi$tnum.b: Put/overwrite loop"
- for { set n 0 } { $n < $nentries } { incr n } {
- set newd $data($n).$keys($n)
-
- set txn [$env txn]
- set ret [eval {$pdb put} -txn $txn \
- {$keys($n) [chop_data $pmethod $newd]}]
- error_check_good put_overwrite($n) $ret 0
- set data($n) [pad_data $pmethod $newd]
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts "\tSi$tnum.c: Primary delete loop: deleting $half entries"
- for { set n $half } { $n < $nentries } { incr n } {
- set txn [$env txn]
- set ret [$pdb del -txn $txn $keys($n)]
- error_check_good pdel($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $half keys data "Si$tnum.c"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set callback [callback_n 0]
- for { set n $quar } { $n < $half } { incr n } {
- set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
- set txn [$env txn]
- set ret [$sdb del -txn $txn $skey]
- error_check_good sdel($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d"
-
- puts "\tSi$tnum.e: Closing/disassociating primary first"
- error_check_good primary_close [$pdb close] 0
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/si006.tcl b/storage/bdb/test/si006.tcl
deleted file mode 100644
index 3a1dbb3c4f8..00000000000
--- a/storage/bdb/test/si006.tcl
+++ /dev/null
@@ -1,129 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2002
-# Sleepycat Software. All rights reserved.
-#
-# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $
-#
-# TEST sindex006
-# TEST Basic secondary index put/delete test with transactions
-proc sindex006 { methods {nentries 200} {tnum 6} args } {
- source ./include.tcl
- global dict nsecondaries
-
- # Primary method/args.
- set pmethod [lindex $methods 0]
- set pargs [convert_args $pmethod $args]
- set pomethod [convert_method $pmethod]
-
- # Method/args for all the secondaries. If only one method
- # was specified, assume the same method and a standard N
- # secondaries.
- set methods [lrange $methods 1 end]
- if { [llength $methods] == 0 } {
- for { set i 0 } { $i < $nsecondaries } { incr i } {
- lappend methods $pmethod
- }
- }
-
- set argses [convert_argses $methods $args]
- set omethods [convert_methods $methods]
-
- puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
- puts " with transactions"
- env_cleanup $testdir
-
- set pname "primary00$tnum.db"
- set snamebase "secondary00$tnum"
-
- # Open an environment
- # XXX if one is not supplied!
- set env [berkdb_env -create -home $testdir -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- # Open the primary.
- set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
- $pargs $pname]
- error_check_good primary_open [is_valid_db $pdb] TRUE
-
- # Open and associate the secondaries
- set sdbs {}
- for { set i 0 } { $i < [llength $omethods] } { incr i } {
- set sdb [eval {berkdb_open -create -auto_commit -env} $env \
- [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
- error_check_good second_open($i) [is_valid_db $sdb] TRUE
-
- error_check_good db_associate($i) \
- [$pdb associate -auto_commit [callback_n $i] $sdb] 0
- lappend sdbs $sdb
- }
-
- puts "\tSindex00$tnum.a: Put loop"
- set did [open $dict]
- for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
- if { [is_record_based $pmethod] == 1 } {
- set key [expr $n + 1]
- set datum $str
- } else {
- set key $str
- gets $did datum
- }
- set keys($n) $key
- set data($n) [pad_data $pmethod $datum]
-
- set txn [$env txn]
- set ret [eval {$pdb put} -txn $txn \
- {$key [chop_data $pmethod $datum]}]
- error_check_good put($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- close $did
- check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
-
- puts "\tSindex00$tnum.b: Put/overwrite loop"
- for { set n 0 } { $n < $nentries } { incr n } {
- set newd $data($n).$keys($n)
-
- set txn [$env txn]
- set ret [eval {$pdb put} -txn $txn \
- {$keys($n) [chop_data $pmethod $newd]}]
- error_check_good put_overwrite($n) $ret 0
- set data($n) [pad_data $pmethod $newd]
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
-
- # Delete the second half of the entries through the primary.
- # We do the second half so we can just pass keys(0 ... n/2)
- # to check_secondaries.
- set half [expr $nentries / 2]
- puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
- for { set n $half } { $n < $nentries } { incr n } {
- set txn [$env txn]
- set ret [$pdb del -txn $txn $keys($n)]
- error_check_good pdel($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
-
- # Delete half of what's left, through the first secondary.
- set quar [expr $half / 2]
- puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
- set sdb [lindex $sdbs 0]
- set callback [callback_n 0]
- for { set n $quar } { $n < $half } { incr n } {
- set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
- set txn [$env txn]
- set ret [$sdb del -txn $txn $skey]
- error_check_good sdel($n) $ret 0
- error_check_good txn_commit($n) [$txn commit] 0
- }
- check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
-
- puts "\tSindex00$tnum.e: Closing/disassociating primary first"
- error_check_good primary_close [$pdb close] 0
- foreach sdb $sdbs {
- error_check_good secondary_close [$sdb close] 0
- }
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/sindex.tcl b/storage/bdb/test/sindex.tcl
deleted file mode 100644
index fc2a0fc2f31..00000000000
--- a/storage/bdb/test/sindex.tcl
+++ /dev/null
@@ -1,259 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2001-2002
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $
-#
-# Secondary index test driver and maintenance routines.
-#
-# Breaking from the usual convention, we put the driver function
-# for the secondary index tests here, in its own file. The reason
-# for this is that it's something which compartmentalizes nicely,
-# has little in common with other driver functions, and
-# is likely to be run on its own from time to time.
-#
-# The secondary index tests themselves live in si0*.tcl.
-
-# Standard number of secondary indices to create if a single-element
-# list of methods is passed into the secondary index tests.
-global nsecondaries
-set nsecondaries 2
-
-# Run the secondary index tests.
-proc sindex { {verbose 0} args } {
- global verbose_check_secondaries
- set verbose_check_secondaries $verbose
-
- # Run basic tests with a single secondary index and a small number
- # of keys, then again with a larger number of keys. (Note that
- # we can't go above 5000, since we use two items from our
- # 10K-word list for each key/data pair.)
- foreach n { 200 5000 } {
- foreach pm { btree hash recno frecno queue queueext } {
- foreach sm { dbtree dhash ddbtree ddhash btree hash } {
- sindex001 [list $pm $sm $sm] $n
- sindex002 [list $pm $sm $sm] $n
- # Skip tests 3 & 4 for large lists;
- # they're not that interesting.
- if { $n < 1000 } {
- sindex003 [list $pm $sm $sm] $n
- sindex004 [list $pm $sm $sm] $n
- }
-
- sindex006 [list $pm $sm $sm] $n
- }
- }
- }
-
- # Run secondary index join test. (There's no point in running
- # this with both lengths, the primary is unhappy for now with fixed-
- # length records (XXX), and we need unsorted dups in the secondaries.)
- foreach pm { btree hash recno } {
- foreach sm { btree hash } {
- sindex005 [list $pm $sm $sm] 1000
- }
- sindex005 [list $pm btree hash] 1000
- sindex005 [list $pm hash btree] 1000
- }
-
-
- # Run test with 50 secondaries.
- foreach pm { btree hash } {
- set methlist [list $pm]
- for { set i 0 } { $i < 50 } { incr i } {
- # XXX this should incorporate hash after #3726
- if { $i % 2 == 0 } {
- lappend methlist "dbtree"
- } else {
- lappend methlist "ddbtree"
- }
- }
- sindex001 $methlist 500
- sindex002 $methlist 500
- sindex003 $methlist 500
- sindex004 $methlist 500
- }
-}
-
-# The callback function we use for each given secondary in most tests
-# is a simple function of its place in the list of secondaries (0-based)
-# and the access method (since recnos may need different callbacks).
-#
-# !!!
-# Note that callbacks 0-3 return unique secondary keys if the input data
-# are unique; callbacks 4 and higher may not, so don't use them with
-# the normal wordlist and secondaries that don't support dups.
-# The callbacks that incorporate a key don't work properly with recno
-# access methods, at least not in the current test framework (the
-# error_check_good lines test for e.g. 1foo, when the database has
-# e.g. 0x010x000x000x00foo).
-proc callback_n { n } {
- switch $n {
- 0 { return _s_reversedata }
- 1 { return _s_noop }
- 2 { return _s_concatkeydata }
- 3 { return _s_concatdatakey }
- 4 { return _s_reverseconcat }
- 5 { return _s_truncdata }
- 6 { return _s_alwayscocacola }
- }
- return _s_noop
-}
-
-proc _s_reversedata { a b } { return [reverse $b] }
-proc _s_truncdata { a b } { return [string range $b 1 end] }
-proc _s_concatkeydata { a b } { return $a$b }
-proc _s_concatdatakey { a b } { return $b$a }
-proc _s_reverseconcat { a b } { return [reverse $a$b] }
-proc _s_alwayscocacola { a b } { return "Coca-Cola" }
-proc _s_noop { a b } { return $b }
-
-# Should the check_secondary routines print lots of output?
-set verbose_check_secondaries 0
-
-# Given a primary database handle, a list of secondary handles, a
-# number of entries, and arrays of keys and data, verify that all
-# databases have what they ought to.
-proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
- upvar $keyarr keys
- upvar $dataarr data
- global verbose_check_secondaries
-
- # Make sure each key/data pair is in the primary.
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.1: Each key/data pair is in the primary"
- }
- for { set i 0 } { $i < $nentries } { incr i } {
- error_check_good pdb_get($i) [$pdb get $keys($i)] \
- [list [list $keys($i) $data($i)]]
- }
-
- for { set j 0 } { $j < [llength $sdbs] } { incr j } {
- # Make sure each key/data pair is in this secondary.
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.2:\
- Each skey/key/data tuple is in secondary #$j"
- }
- for { set i 0 } { $i < $nentries } { incr i } {
- set sdb [lindex $sdbs $j]
- set skey [[callback_n $j] $keys($i) $data($i)]
- error_check_good sdb($j)_pget($i) \
- [$sdb pget -get_both $skey $keys($i)] \
- [list [list $skey $keys($i) $data($i)]]
- }
-
- # Make sure this secondary contains only $nentries
- # items.
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.3: Secondary #$j has $nentries items"
- }
- set dbc [$sdb cursor]
- error_check_good dbc($i) \
- [is_valid_cursor $dbc $sdb] TRUE
- for { set k 0 } { [llength [$dbc get -next]] > 0 } \
- { incr k } { }
- error_check_good numitems($i) $k $nentries
- error_check_good dbc($i)_close [$dbc close] 0
- }
-
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.4: Primary has $nentries items"
- }
- set dbc [$pdb cursor]
- error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
- for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
- error_check_good numitems $k $nentries
- error_check_good pdbc_close [$dbc close] 0
-}
-
-# Given a primary database handle and a list of secondary handles, walk
-# through the primary and make sure all the secondaries are correct,
-# then walk through the secondaries and make sure the primary is correct.
-#
-# This is slightly less rigorous than the normal check_secondaries--we
-# use it whenever we don't have up-to-date "keys" and "data" arrays.
-proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
- global verbose_check_secondaries
-
- # Make sure each key/data pair in the primary is in each secondary.
- set pdbc [$pdb cursor]
- error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
- set i 0
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.1:\
- Key/data in primary => key/data in secondaries"
- }
-
- for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
- { set dbt [$pdbc get -next] } {
- incr i
- set pkey [lindex [lindex $dbt 0] 0]
- set pdata [lindex [lindex $dbt 0] 1]
- for { set j 0 } { $j < [llength $sdbs] } { incr j } {
- set sdb [lindex $sdbs $j]
- set sdbt [$sdb pget -get_both \
- [[callback_n $j] $pkey $pdata] $pkey]
- error_check_good pkey($pkey,$j) \
- [lindex [lindex $sdbt 0] 1] $pkey
- error_check_good pdata($pdata,$j) \
- [lindex [lindex $sdbt 0] 2] $pdata
- }
- }
- error_check_good ccs_pdbc_close [$pdbc close] 0
- error_check_good primary_has_nentries $i $nentries
-
- for { set j 0 } { $j < [llength $sdbs] } { incr j } {
- if { $verbose_check_secondaries } {
- puts "\t\t$pref.2:\
- Key/data in secondary #$j => key/data in primary"
- }
- set sdb [lindex $sdbs $j]
- set sdbc [$sdb cursor]
- error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
- set i 0
- for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
- { set dbt [$sdbc pget -next] } {
- incr i
- set pkey [lindex [lindex $dbt 0] 1]
- set pdata [lindex [lindex $dbt 0] 2]
- error_check_good pdb_get($pkey/$pdata,$j) \
- [$pdb get -get_both $pkey $pdata] \
- [list [list $pkey $pdata]]
- }
- error_check_good secondary($j)_has_nentries $i $nentries
-
- # To exercise pget -last/pget -prev, we do it backwards too.
- set i 0
- for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
- { set dbt [$sdbc pget -prev] } {
- incr i
- set pkey [lindex [lindex $dbt 0] 1]
- set pdata [lindex [lindex $dbt 0] 2]
- error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
- [$pdb get -get_both $pkey $pdata] \
- [list [list $pkey $pdata]]
- }
- error_check_good secondary($j)_has_nentries_bkwds $i $nentries
-
- error_check_good ccs_sdbc_close($j) [$sdbc close] 0
- }
-}
-
-# The secondary index tests take a list of the access methods that
-# each array ought to use. Convert at one blow into a list of converted
-# argses and omethods for each method in the list.
-proc convert_argses { methods largs } {
- set ret {}
- foreach m $methods {
- lappend ret [convert_args $m $largs]
- }
- return $ret
-}
-proc convert_methods { methods } {
- set ret {}
- foreach m $methods {
- lappend ret [convert_method $m]
- }
- return $ret
-}
diff --git a/storage/bdb/test/sysscript.tcl b/storage/bdb/test/sysscript.tcl
deleted file mode 100644
index 8386949a6ff..00000000000
--- a/storage/bdb/test/sysscript.tcl
+++ /dev/null
@@ -1,282 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: sysscript.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $
-#
-# System integration test script.
-# This script runs a single process that tests the full functionality of
-# the system. The database under test contains nfiles files. Each process
-# randomly generates a key and some data. Both keys and data are bimodally
-# distributed between small keys (1-10 characters) and large keys (the avg
-# length is indicated via the command line parameter.
-# The process then decides on a replication factor between 1 and nfiles.
-# It writes the key and data to that many files and tacks on the file ids
-# of the files it writes to the data string. For example, let's say that
-# I randomly generate the key dog and data cat. Then I pick a replication
-# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
-# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
-# pair to each file and then commit. Notice that I may generate replication
-# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
-#
-# Usage: sysscript dir nfiles key_avg data_avg
-#
-# dir: DB_HOME directory
-# nfiles: number of files in the set
-# key_avg: average big key size
-# data_avg: average big data size
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set mypid [pid]
-
-set usage "sysscript dir nfiles key_avg data_avg method"
-
-# Verify usage
-if { $argc != 5 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-puts [concat "Argc: " $argc " Argv: " $argv]
-
-# Initialize arguments
-set dir [lindex $argv 0]
-set nfiles [ lindex $argv 1 ]
-set key_avg [ lindex $argv 2 ]
-set data_avg [ lindex $argv 3 ]
-set method [ lindex $argv 4 ]
-
-# Initialize seed
-global rand_init
-berkdb srand $rand_init
-
-puts "Beginning execution for $mypid"
-puts "$dir DB_HOME"
-puts "$nfiles files"
-puts "$key_avg average key length"
-puts "$data_avg average data length"
-
-flush stdout
-
-# Create local environment
-set dbenv [berkdb_env -txn -home $dir]
-set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
-if {$err != 0} {
- puts $ret
- return
-}
-
-# Now open the files
-for { set i 0 } { $i < $nfiles } { incr i } {
- set file test044.$i.db
- set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
- set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \
- error] 1} ret]
- if {$err != 0} {
- puts $ret
- return
- }
-}
-
-set record_based [is_record_based $method]
-while { 1 } {
- # Decide if we're going to create a big key or a small key
- # We give small keys a 70% chance.
- if { [berkdb random_int 1 10] < 8 } {
- set k [random_data 5 0 0 $record_based]
- } else {
- set k [random_data $key_avg 0 0 $record_based]
- }
- set data [chop_data $method [random_data $data_avg 0 0]]
-
- set txn [$dbenv txn]
- set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
- $dbenv.txn] 1} ret]
- if {$err != 0} {
- puts $ret
- return
- }
-
- # Open cursors
- for { set f 0 } {$f < $nfiles} {incr f} {
- set cursors($f) [$db($f) cursor -txn $txn]
- set err [catch {error_check_good $mypid:cursor_open \
- [is_substr $cursors($f) $db($f)] 1} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
- set aborted 0
-
- # Check to see if key is already in database
- set found 0
- for { set i 0 } { $i < $nfiles } { incr i } {
- set r [$db($i) get -txn $txn $k]
- set r [$db($i) get -txn $txn $k]
- if { $r == "-1" } {
- for {set f 0 } {$f < $nfiles} {incr f} {
- set err [catch {error_check_good \
- $mypid:cursor_close \
- [$cursors($f) close] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
- set err [catch {error_check_good $mypid:txn_abort \
- [$txn abort] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set aborted 1
- set found 2
- break
- } elseif { $r != "Key $k not found." } {
- set found 1
- break
- }
- }
- switch $found {
- 2 {
- # Transaction aborted, no need to do anything.
- }
- 0 {
- # Key was not found, decide how much to replicate
- # and then create a list of that many file IDs.
- set repl [berkdb random_int 1 $nfiles]
- set fset ""
- for { set i 0 } { $i < $repl } {incr i} {
- set f [berkdb random_int 0 [expr $nfiles - 1]]
- lappend fset $f
- set data [chop_data $method $f:$data]
- }
-
- foreach i $fset {
- set r [$db($i) put -txn $txn $k $data]
- if {$r == "-1"} {
- for {set f 0 } {$f < $nfiles} {incr f} {
- set err [catch {error_check_good \
- $mypid:cursor_close \
- [$cursors($f) close] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
- set err [catch {error_check_good \
- $mypid:txn_abort [$txn abort] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set aborted 1
- break
- }
- }
- }
- 1 {
- # Key was found. Make sure that all the data values
- # look good.
- set f [zero_list $nfiles]
- set data $r
- while { [set ndx [string first : $r]] != -1 } {
- set fnum [string range $r 0 [expr $ndx - 1]]
- if { [lindex $f $fnum] == 0 } {
- #set flag -set
- set full [record $cursors($fnum) get -set $k]
- } else {
- #set flag -next
- set full [record $cursors($fnum) get -next]
- }
- if {[llength $full] == 0} {
- for {set f 0 } {$f < $nfiles} {incr f} {
- set err [catch {error_check_good \
- $mypid:cursor_close \
- [$cursors($f) close] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
- set err [catch {error_check_good \
- $mypid:txn_abort [$txn abort] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set aborted 1
- break
- }
- set err [catch {error_check_bad \
- $mypid:curs_get($k,$data,$fnum,$flag) \
- [string length $full] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set key [lindex [lindex $full 0] 0]
- set rec [pad_data $method [lindex [lindex $full 0] 1]]
- set err [catch {error_check_good \
- $mypid:dbget_$fnum:key $key $k} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set err [catch {error_check_good \
- $mypid:dbget_$fnum:data($k) $rec $data} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- set f [lreplace $f $fnum $fnum 1]
- incr ndx
- set r [string range $r $ndx end]
- }
- }
- }
- if { $aborted == 0 } {
- for {set f 0 } {$f < $nfiles} {incr f} {
- set err [catch {error_check_good $mypid:cursor_close \
- [$cursors($f) close] 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
- set err [catch {error_check_good $mypid:commit [$txn commit] \
- 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
- }
-}
-
-# Close files
-for { set i 0 } { $i < $nfiles} { incr i } {
- set r [$db($i) close]
- set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
- if {$err != 0} {
- puts $ret
- return
- }
-}
-
-# Close tm and environment
-$dbenv close
-
-puts "[timestamp] [pid] Complete"
-flush stdout
-
-filecheck $file 0
diff --git a/storage/bdb/test/test.tcl b/storage/bdb/test/test.tcl
deleted file mode 100644
index 3bd3e4d9c40..00000000000
--- a/storage/bdb/test/test.tcl
+++ /dev/null
@@ -1,1941 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test.tcl,v 11.273 2004/11/01 14:48:23 carol Exp $
-
-source ./include.tcl
-
-# Load DB's TCL API.
-load $tcllib
-
-if { [file exists $testdir] != 1 } {
- file mkdir $testdir
-}
-
-global __debug_print
-global __debug_on
-global __debug_test
-
-#
-# Test if utilities work to figure out the path. Most systems
-# use ., but QNX has a problem with execvp of shell scripts which
-# causes it to break.
-#
-set stat [catch {exec ./db_printlog -?} ret]
-if { [string first "exec format error" $ret] != -1 } {
- set util_path ./.libs
-} else {
- set util_path .
-}
-set __debug_print 0
-set encrypt 0
-set old_encrypt 0
-set passwd test_passwd
-
-# Error stream that (should!) always go to the console, even if we're
-# redirecting to ALL.OUT.
-set consoleerr stderr
-
-set dict $test_path/wordlist
-set alphabet "abcdefghijklmnopqrstuvwxyz"
-set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
-
-# Random number seed.
-global rand_init
-set rand_init 12082003
-
-# Default record length for fixed record length access method(s)
-set fixed_len 20
-
-set recd_debug 0
-set log_log_record_types 0
-set ohandles {}
-
-# Normally, we're not running an all-tests-in-one-env run. This matters
-# for error stream/error prefix settings in berkdb_open.
-global is_envmethod
-set is_envmethod 0
-
-# For testing locker id wrap around.
-global lock_curid
-global lock_maxid
-set lock_curid 0
-set lock_maxid 2147483647
-global txn_curid
-global txn_maxid
-set txn_curid 2147483648
-set txn_maxid 4294967295
-
-# The variable one_test allows us to run all the permutations
-# of a test with run_all or run_std.
-global one_test
-if { [info exists one_test] != 1 } {
- set one_test "ALL"
-}
-
-# This is where the test numbering and parameters now live.
-source $test_path/testparams.tcl
-
-# Set up any OS-specific values
-global tcl_platform
-set is_windows_test [is_substr $tcl_platform(os) "Win"]
-set is_hp_test [is_substr $tcl_platform(os) "HP-UX"]
-set is_je_test 0
-set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
-set upgrade_be [big_endian]
-
-global EXE BAT
-if { $is_windows_test == 1 } {
- set EXE ".exe"
- set BAT ".bat"
-} else {
- set EXE ""
- set BAT ""
-}
-
-# Try to open an encrypted database. If it fails, this release
-# doesn't support encryption, and encryption tests should be skipped.
-set has_crypto 1
-set stat [catch {set db \
- [eval {berkdb open -create -btree -encryptaes test_passwd} ] } result ]
-if { $stat != 0 } {
- # Make sure it's the right error for a non-crypto release.
- error_check_good non_crypto_release \
- [expr [is_substr $result "operation not supported"] || \
- [is_substr $result "invalid argument"]] 1
- set has_crypto 0
-} else {
- # It is a crypto release. Get rid of the db, we don't need it.
- error_check_good close_encrypted_db [$db close] 0
-}
-
-# From here on out, test.tcl contains the procs that are used to
-# run all or part of the test suite.
-
-proc run_std { { testname ALL } args } {
- global test_names
- global one_test
- source ./include.tcl
-
- set one_test $testname
- if { $one_test != "ALL" } {
- # Source testparams again to adjust test_names.
- source $test_path/testparams.tcl
- }
-
- set exflgs [eval extractflags $args]
- set args [lindex $exflgs 0]
- set flags [lindex $exflgs 1]
-
- set display 1
- set run 1
- set am_only 0
- set no_am 0
- set std_only 1
- set rflags {--}
- foreach f $flags {
- switch $f {
- A {
- set std_only 0
- }
- M {
- set no_am 1
- puts "run_std: all but access method tests."
- }
- m {
- set am_only 1
- puts "run_std: access method tests only."
- }
- n {
- set display 1
- set run 0
- set rflags [linsert $rflags 0 "-n"]
- }
- }
- }
-
- if { $std_only == 1 } {
- fileremove -f ALL.OUT
-
- set o [open ALL.OUT a]
- if { $run == 1 } {
- puts -nonewline "Test suite run started at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts [berkdb version -string]
-
- puts -nonewline $o "Test suite run started at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- puts $o [berkdb version -string]
- }
- close $o
- }
-
- set test_list {
- {"environment" "env"}
- {"archive" "archive"}
- {"file operations" "fop"}
- {"locking" "lock"}
- {"logging" "log"}
- {"memory pool" "memp"}
- {"mutex" "mutex"}
- {"transaction" "txn"}
- {"deadlock detection" "dead"}
- {"subdatabase" "sdb"}
- {"byte-order" "byte"}
- {"recno backing file" "rsrc"}
- {"DBM interface" "dbm"}
- {"NDBM interface" "ndbm"}
- {"Hsearch interface" "hsearch"}
- {"secondary index" "sindex"}
- }
-
- if { $am_only == 0 } {
-
- foreach pair $test_list {
- set msg [lindex $pair 0]
- set cmd [lindex $pair 1]
- puts "Running $msg tests"
- if [catch {exec $tclsh_path << \
- "global one_test; set one_test $one_test; \
- source $test_path/test.tcl; r $rflags $cmd" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: $cmd test: $res"
- close $o
- }
- }
-
- # Run recovery tests.
- #
- # XXX These too are broken into separate tclsh instantiations
- # so we don't require so much memory, but I think it's cleaner
- # and more useful to do it down inside proc r than here,
- # since "r recd" gets done a lot and needs to work.
- #
- # Note that we still wrap the test in an exec so that
- # its output goes to ALL.OUT. run_recd will wrap each test
- # so that both error streams go to stdout (which here goes
- # to ALL.OUT); information that run_recd wishes to print
- # to the "real" stderr, but outside the wrapping for each test,
- # such as which tests are being skipped, it can still send to
- # stderr.
- puts "Running recovery tests"
- if [catch {
- exec $tclsh_path << \
- "global one_test; set one_test $one_test; \
- source $test_path/test.tcl; r $rflags recd" \
- 2>@ stderr >> ALL.OUT
- } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: recd tests: $res"
- close $o
- }
-
- # Run join test
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't
- # require so much memory.
- if { $one_test == "ALL" } {
- puts "Running join test"
- foreach test "join1 join2 join3 join4 join5 join6" {
- if [catch {exec $tclsh_path << \
- "source $test_path/test.tcl; r $rflags $test" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: $test test: $res"
- close $o
- }
- }
- }
- }
-
- if { $no_am == 0 } {
- # Access method tests.
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't
- # require so much memory.
- foreach method \
- "btree hash queue queueext recno rbtree frecno rrecno" {
- puts "Running $method tests"
- foreach test $test_names(test) {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- run_method \
- -$method $test $display $run $o
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path << \
- "global one_test; \
- set one_test $one_test; \
- source $test_path/test.tcl; \
- run_method \
- -$method $test $display $run"\
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL:$test $method: $res"
- close $o
- }
- }
- }
- }
- }
-
- # If not actually running, no need to check for failure.
- # If running in the context of the larger 'run_all' we don't
- # check for failure here either.
- if { $run == 0 || $std_only == 0 } {
- return
- }
-
- set failed [check_output ALL.OUT]
-
- set o [open ALL.OUT a]
- if { $failed == 0 } {
- puts "Regression Tests Succeeded"
- puts $o "Regression Tests Succeeded"
- } else {
- puts "Regression Tests Failed"
- puts "Check UNEXPECTED OUTPUT lines."
- puts "Review ALL.OUT.x for details."
- puts $o "Regression Tests Failed"
- }
-
- puts -nonewline "Test suite run completed at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts -nonewline $o "Test suite run completed at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- close $o
-}
-
-proc check_output { file } {
- # These are all the acceptable patterns.
- set pattern {(?x)
- ^[:space:]*$|
- .*?wrap\.tcl.*|
- .*?dbscript\.tcl.*|
- .*?ddscript\.tcl.*|
- .*?mpoolscript\.tcl.*|
- .*?mutexscript\.tcl.*|
- ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$|
- ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$|
- ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*|
- ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*|
- ^\d:\sPut\s\d*\sstrings\srandom\soffsets.*|
- ^100.*|
- ^eval\s.*|
- ^exec\s.*|
- ^jointest.*$|
- ^r\sarchive\s*|
- ^r\sdbm\s*|
- ^r\shsearch\s*|
- ^r\sndbm\s*|
- ^r\srpc\s*|
- ^run_recd:\s.*|
- ^run_reptest:\s.*|
- ^run_rpcmethod:\s.*|
- ^run_secenv:\s.*|
- ^All\sprocesses\shave\sexited.$|
- ^Beginning\scycle\s\d$|
- ^Byteorder:.*|
- ^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$|
- ^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$|
- ^Ending\srecord.*|
- ^Environment\s.*?specified;\s\sskipping\.$|
- ^Executing\srecord\s.*|
- ^Join\stest:\.*|
- ^Method:\s.*|
- ^Repl:\stest\d\d\d:.*|
- ^Repl:\ssdb\d\d\d:.*|
- ^Script\swatcher\sprocess\s.*|
- ^Sleepycat\sSoftware:\sBerkeley\sDB\s.*|
- ^Test\ssuite\srun\s.*|
- ^Unlinking\slog:\serror\smessage\sOK$|
- ^Verifying\s.*|
- ^\t*\.\.\.dbc->get.*$|
- ^\t*\.\.\.dbc->put.*$|
- ^\t*\.\.\.key\s\d*$|
- ^\t*\.\.\.Skipping\sdbc.*|
- ^\t*and\s\d*\sduplicate\sduplicates\.$|
- ^\t*About\sto\srun\srecovery\s.*complete$|
- ^\t*Archive[:\.].*|
- ^\t*Building\s.*|
- ^\t*closing\ssecondaries\.$|
- ^\t*Command\sexecuted\sand\s.*$|
- ^\t*DBM.*|
- ^\t*[d|D]ead[0-9][0-9][0-9].*|
- ^\t*Dump\/load\sof.*|
- ^\t*[e|E]nv[0-9][0-9][0-9].*|
- ^\t*Executing\scommand$|
- ^\t*Executing\stxn_.*|
- ^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
- ^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
- ^\t*[f|F]op[0-9][0-9][0-9].*|
- ^\t*HSEARCH.*|
- ^\t*Initial\sCheckpoint$|
- ^\t*Iteration\s\d*:\sCheckpointing\.$|
- ^\t*Joining:\s.*|
- ^\t*Kid[1|2]\sabort\.\.\.complete$|
- ^\t*Kid[1|2]\scommit\.\.\.complete$|
- ^\t*[l|L]ock[0-9][0-9][0-9].*|
- ^\t*[l|L]og[0-9][0-9][0-9].*|
- ^\t*[m|M]emp[0-9][0-9][0-9].*|
- ^\t*[m|M]utex[0-9][0-9][0-9].*|
- ^\t*NDBM.*|
- ^\t*opening\ssecondaries\.$|
- ^\t*op_recover_rec:\sRunning\srecovery.*|
- ^\t*[r|R]ecd[0-9][0-9][0-9].*|
- ^\t*[r|R]ep[0-9][0-9][0-9].*|
- ^\t*[r|R]ep_test.*|
- ^\t*[r|R]pc[0-9][0-9][0-9].*|
- ^\t*[r|R]src[0-9][0-9][0-9].*|
- ^\t*Run_rpcmethod.*|
- ^\t*Running\srecovery\son\s.*|
- ^\t*[s|S]ec[0-9][0-9][0-9].*|
- ^\t*[s|S]i[0-9][0-9][0-9].*|
- ^\t*Sijoin.*|
- ^\t*sdb[0-9][0-9][0-9].*|
- ^\t*Skipping\s.*|
- ^\t*Subdb[0-9][0-9][0-9].*|
- ^\t*Subdbtest[0-9][0-9][0-9].*|
- ^\t*Syncing$|
- ^\t*[t|T]est[0-9][0-9][0-9].*|
- ^\t*[t|T]xn[0-9][0-9][0-9].*|
- ^\t*Txnscript.*|
- ^\t*Using\s.*?\senvironment\.$|
- ^\t*Verification\sof.*|
- ^\t*with\stransactions$}
-
- set failed 0
- set f [open $file r]
- while { [gets $f line] >= 0 } {
- if { [regexp $pattern $line] == 0 } {
- puts -nonewline "UNEXPECTED OUTPUT: "
- puts $line
- set failed 1
- }
- }
- close $f
- return $failed
-}
-
-proc r { args } {
- global test_names
- global has_crypto
- global rand_init
- global one_test
-
- source ./include.tcl
-
- set exflgs [eval extractflags $args]
- set args [lindex $exflgs 0]
- set flags [lindex $exflgs 1]
-
- set display 1
- set run 1
- set saveflags "--"
- foreach f $flags {
- switch $f {
- n {
- set display 1
- set run 0
- set saveflags "-n $saveflags"
- }
- }
- }
-
- if {[catch {
- set sub [ lindex $args 0 ]
- switch $sub {
- dead -
- env -
- lock -
- log -
- memp -
- mutex -
- rsrc -
- sdbtest -
- txn {
- if { $display } {
- run_subsystem $sub 1 0
- }
- if { $run } {
- run_subsystem $sub
- }
- }
- byte {
- if { $one_test == "ALL" } {
- run_test byteorder $display $run
- }
- }
- archive -
- dbm -
- hsearch -
- ndbm -
- shelltest {
- if { $one_test == "ALL" } {
- if { $display } { puts "r $sub" }
- if { $run } {
- check_handles
- $sub
- }
- }
- }
- bigfile -
- elect -
- fop {
- foreach test $test_names($sub) {
- eval run_test $test $display $run
- }
- }
- join {
- eval r $saveflags join1
- eval r $saveflags join2
- eval r $saveflags join3
- eval r $saveflags join4
- eval r $saveflags join5
- eval r $saveflags join6
- }
- join1 {
- if { $display } { puts jointest }
- if { $run } {
- check_handles
- jointest
- }
- }
- joinbench {
- puts "[timestamp]"
- eval r $saveflags join1
- eval r $saveflags join2
- puts "[timestamp]"
- }
- join2 {
- if { $display } { puts "jointest 512" }
- if { $run } {
- check_handles
- jointest 512
- }
- }
- join3 {
- if { $display } {
- puts "jointest 8192 0 -join_item"
- }
- if { $run } {
- check_handles
- jointest 8192 0 -join_item
- }
- }
- join4 {
- if { $display } { puts "jointest 8192 2" }
- if { $run } {
- check_handles
- jointest 8192 2
- }
- }
- join5 {
- if { $display } { puts "jointest 8192 3" }
- if { $run } {
- check_handles
- jointest 8192 3
- }
- }
- join6 {
- if { $display } { puts "jointest 512 3" }
- if { $run } {
- check_handles
- jointest 512 3
- }
- }
- recd {
- check_handles
- run_recds $run $display [lrange $args 1 end]
- }
- rep {
- foreach test $test_names(rep) {
- run_test $test $display $run
- }
- # We seed the random number generator here
- # instead of in run_repmethod so that we
- # aren't always reusing the first few
- # responses from random_int.
- #
- berkdb srand $rand_init
- foreach sub { test sdb } {
- foreach test $test_names($sub) {
- eval run_test run_repmethod \
- $display $run $test
- }
- }
- }
- rpc {
- if { $one_test == "ALL" } {
- if { $display } { puts "r $sub" }
- global BAT EXE rpc_svc svc_list
- global rpc_svc svc_list is_je_test
- set old_rpc_src $rpc_svc
- foreach rpc_svc $svc_list {
- if { $rpc_svc == "berkeley_dbje_svc" } {
- set old_util_path $util_path
- set util_path $je_root/dist
- set is_je_test 1
- }
-
- if { !$run || \
- ![file exist $util_path/$rpc_svc$BAT] || \
- ![file exist $util_path/$rpc_svc$EXE] } {
- continue
- }
-
- run_subsystem rpc
- if { [catch {run_rpcmethod -txn} ret] != 0 } {
- puts $ret
- }
-
- if { $is_je_test } {
- check_handles
- eval run_rpcmethod -btree
- verify_dir $testdir "" 1
- } else {
- run_test run_rpcmethod $display $run
- }
-
- if { $is_je_test } {
- set util_path $old_util_path
- set is_je_test 0
- }
-
- }
- set rpc_svc $old_rpc_src
- }
- }
- sec {
- # Skip secure mode tests if release
- # does not support encryption.
- if { $has_crypto == 0 } {
- return
- }
- if { $display } {
- run_subsystem $sub 1 0
- }
- if { $run } {
- run_subsystem $sub 0 1
- }
- foreach test $test_names(test) {
- eval run_test run_secmethod \
- $display $run $test
- eval run_test run_secenv \
- $display $run $test
- }
- }
- sdb {
- if { $one_test == "ALL" } {
- if { $display } {
- run_subsystem sdbtest 1 0
- }
- if { $run } {
- run_subsystem sdbtest 0 1
- }
- }
- foreach test $test_names(sdb) {
- eval run_test $test $display $run
- }
- }
- sindex {
- if { $one_test == "ALL" } {
- if { $display } {
- sindex 1 0
- sijoin 1 0
- }
- if { $run } {
- sindex 0 1
- sijoin 0 1
- }
- }
- }
- btree -
- rbtree -
- hash -
- iqueue -
- iqueueext -
- queue -
- queueext -
- recno -
- frecno -
- rrecno {
- foreach test $test_names(test) {
- eval run_method [lindex $args 0] $test \
- $display $run [lrange $args 1 end]
- }
- }
-
- default {
- error \
- "FAIL:[timestamp] r: $args: unknown command"
- }
- }
- flush stdout
- flush stderr
- } res] != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp] r: $args: $theError"
- } else {
- error $theError;
- }
- }
-}
-
-proc run_subsystem { sub { display 0 } { run 1} } {
- global test_names
-
- if { [info exists test_names($sub)] != 1 } {
- puts stderr "Subsystem $sub has no tests specified in\
- testparams.tcl; skipping."
- return
- }
- foreach test $test_names($sub) {
- if { $display } {
- puts "eval $test"
- }
- if { $run } {
- check_handles
- if {[catch {eval $test} ret] != 0 } {
- puts "FAIL: run_subsystem: $sub $test: \
- $ret"
- }
- }
- }
-}
-
-proc run_test { test {display 0} {run 1} args } {
- source ./include.tcl
- foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
- if { $display } {
- puts "eval $test -$method $args; verify_dir $testdir \"\" 1"
- }
- if { $run } {
- check_handles
- eval $test -$method $args
- verify_dir $testdir "" 1
- }
- }
-}
-
-proc run_method { method test {display 0} {run 1} \
- { outfile stdout } args } {
- global __debug_on
- global __debug_print
- global __debug_test
- global test_names
- global parms
- source ./include.tcl
-
- if {[catch {
- if { $display } {
- puts -nonewline $outfile "eval $test $method"
- puts -nonewline $outfile " $parms($test) $args"
- puts $outfile " ; verify_dir $testdir \"\" 1"
- }
- if { $run } {
- check_handles $outfile
- puts $outfile "[timestamp]"
- eval $test $method $parms($test) $args
- if { $__debug_print != 0 } {
- puts $outfile ""
- }
- # verify all databases the test leaves behind
- verify_dir $testdir "" 1
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- }
- flush stdout
- flush stderr
- } res] != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_method: $method $test: $theError"
- } else {
- error $theError;
- }
- }
-}
-
-proc run_rpcmethod { method {largs ""} } {
- global __debug_on
- global __debug_print
- global __debug_test
- global rpc_tests
- global parms
- global is_envmethod
- global rpc_svc
- source ./include.tcl
-
- puts "run_rpcmethod: $method $largs"
-
- set save_largs $largs
- set dpid [rpc_server_start]
- puts "\tRun_rpcmethod.a: started server, pid $dpid"
- remote_cleanup $rpc_server $rpc_testdir $testdir
-
- set home [file tail $rpc_testdir]
-
- set is_envmethod 1
- set use_txn 0
- if { [string first "txn" $method] != -1 } {
- set use_txn 1
- }
- if { $use_txn == 1 } {
- set ntxns 32
- set i 1
- check_handles
- remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb_env -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000} -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set stat [catch {eval txn001_suba $ntxns $env} res]
- if { $stat == 0 } {
- set stat [catch {eval txn001_subb $ntxns $env} res]
- }
- set stat [catch {eval txn003} res]
- error_check_good envclose [$env close] 0
- } else {
- foreach test $rpc_tests($rpc_svc) {
- set stat [catch {
- check_handles
- remote_cleanup $rpc_server $rpc_testdir $testdir
- #
- # Set server cachesize to 128Mb. Otherwise
- # some tests won't fit (like test084 -btree).
- #
- set env [eval {berkdb_env -create -mode 0644 \
- -home $home -server $rpc_server \
- -client_timeout 10000 \
- -cachesize {0 134217728 1}}]
- error_check_good env_open \
- [is_valid_env $env] TRUE
- set largs $save_largs
- append largs " -env $env "
-
- puts "[timestamp]"
- eval $test $method $parms($test) $largs
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- error_check_good envclose [$env close] 0
- set env ""
- } res]
-
- if { $stat != 0} {
- global errorInfo;
-
- puts "$res"
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- puts "FAIL:[timestamp]\
- run_rpcmethod: $method $test: $errorInfo"
- } else {
- puts $theError;
- }
-
- catch { $env close } ignore
- set env ""
- tclkill $dpid
- set dpid [rpc_server_start]
- }
- }
- }
- set is_envmethod 0
- tclkill $dpid
-}
-
-proc run_rpcnoserver { method {largs ""} } {
- global __debug_on
- global __debug_print
- global __debug_test
- global test_names
- global parms
- global is_envmethod
- source ./include.tcl
-
- puts "run_rpcnoserver: $method $largs"
-
- set save_largs $largs
- remote_cleanup $rpc_server $rpc_testdir $testdir
- set home [file tail $rpc_testdir]
-
- set is_envmethod 1
- set use_txn 0
- if { [string first "txn" $method] != -1 } {
- set use_txn 1
- }
- if { $use_txn == 1 } {
- set ntxns 32
- set i 1
- check_handles
- remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb_env -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000} -txn]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set stat [catch {eval txn001_suba $ntxns $env} res]
- if { $stat == 0 } {
- set stat [catch {eval txn001_subb $ntxns $env} res]
- }
- error_check_good envclose [$env close] 0
- } else {
- set stat [catch {
- foreach test $test_names {
- check_handles
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in \
- testparams.tcl; skipping."
- continue
- }
- remote_cleanup $rpc_server $rpc_testdir $testdir
- #
- # Set server cachesize to 1Mb. Otherwise some
- # tests won't fit (like test084 -btree).
- #
- set env [eval {berkdb_env -create -mode 0644 \
- -home $home -server $rpc_server \
- -client_timeout 10000 \
- -cachesize {0 1048576 1} }]
- error_check_good env_open \
- [is_valid_env $env] TRUE
- append largs " -env $env "
-
- puts "[timestamp]"
- eval $test $method $parms($test) $largs
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- set largs $save_largs
- error_check_good envclose [$env close] 0
- }
- } res]
- }
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_rpcnoserver: $method $i: $theError"
- } else {
- error $theError;
- }
- set is_envmethod 0
- }
-
-}
-
-#
-# Run method tests in secure mode.
-#
-proc run_secmethod { method test {display 0} {run 1} \
- { outfile stdout } args } {
- global passwd
- global has_crypto
-
- # Skip secure mode tests if release does not support encryption.
- if { $has_crypto == 0 } {
- return
- }
-
- set largs $args
- append largs " -encryptaes $passwd "
- eval run_method $method $test $display $run $outfile $largs
-}
-
-#
-# Run method tests each in its own, new secure environment.
-#
-proc run_secenv { method test {largs ""} } {
- global __debug_on
- global __debug_print
- global __debug_test
- global is_envmethod
- global has_crypto
- global test_names
- global parms
- global passwd
- source ./include.tcl
-
- # Skip secure mode tests if release does not support encryption.
- if { $has_crypto == 0 } {
- return
- }
-
- puts "run_secenv: $method $test $largs"
-
- set save_largs $largs
- env_cleanup $testdir
- set is_envmethod 1
- set stat [catch {
- check_handles
- set env [eval {berkdb_env -create -mode 0644 -home $testdir \
- -encryptaes $passwd -cachesize {0 1048576 1}}]
- error_check_good env_open [is_valid_env $env] TRUE
- append largs " -env $env "
-
- puts "[timestamp]"
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in\
- testparams.tcl; skipping."
- continue
- }
-
- #
- # Run each test multiple times in the secure env.
- # Once with a secure env + clear database
- # Once with a secure env + secure database
- #
- eval $test $method $parms($test) $largs
- append largs " -encrypt "
- eval $test $method $parms($test) $largs
-
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- set largs $save_largs
- error_check_good envclose [$env close] 0
- error_check_good envremove [berkdb envremove \
- -home $testdir -encryptaes $passwd] 0
- } res]
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_secenv: $method $test: $theError"
- } else {
- error $theError;
- }
- set is_envmethod 0
- }
-
-}
-
-#
-# Run replication method tests in master and client env.
-#
-proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
- {do_sec 0} {do_oob 0} {largs "" } } {
- source ./include.tcl
- global __debug_on
- global __debug_print
- global __debug_test
- global is_envmethod
- global parms
- global passwd
- global has_crypto
-
- puts "run_reptest: $method $test $droppct $nclients $do_del $do_sec $do_oob $largs"
-
- env_cleanup $testdir
- set is_envmethod 1
- set stat [catch {
- if { $do_sec && $has_crypto } {
- set envargs "-encryptaes $passwd"
- append largs " -encrypt "
- } else {
- set envargs ""
- }
- check_handles
- #
- # This will set up the master and client envs
- # and will return us the args to pass to the
- # test.
-
- set largs [repl_envsetup \
- $envargs $largs $test $nclients $droppct $do_oob]
-
- puts "[timestamp]"
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in\
- testparams.tcl; skipping."
- continue
- }
- puts -nonewline \
- "Repl: $test: dropping $droppct%, $nclients clients "
- if { $do_del } {
- puts -nonewline " with delete verification;"
- } else {
- puts -nonewline " no delete verification;"
- }
- if { $do_sec } {
- puts -nonewline " with security;"
- } else {
- puts -nonewline " no security;"
- }
- if { $do_oob } {
- puts -nonewline " with out-of-order msgs;"
- } else {
- puts -nonewline " no out-of-order msgs;"
- }
- puts ""
-
- eval $test $method $parms($test) $largs
-
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- repl_envprocq $test $nclients $do_oob
- repl_envver0 $test $method $nclients
- if { $do_del } {
- repl_verdel $test $method $nclients
- }
- repl_envclose $test $envargs
- } res]
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_reptest: $method $test: $theError"
- } else {
- error $theError;
- }
- }
- set is_envmethod 0
-}
-
-#
-# Run replication method tests in master and client env.
-#
-proc run_repmethod { method test {numcl 0} {display 0} {run 1} \
- {outfile stdout} {largs ""} } {
- source ./include.tcl
- global __debug_on
- global __debug_print
- global __debug_test
- global is_envmethod
- global test_names
- global parms
- global has_crypto
- global passwd
-
- set save_largs $largs
- env_cleanup $testdir
-
- # Use an array for number of clients because we really don't
- # want to evenly-weight all numbers of clients. Favor smaller
- # numbers but test more clients occasionally.
- set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
- set drop_len [expr [llength $drop_list] - 1]
- set client_list { 1 1 2 1 1 1 2 2 3 1 }
- set cl_len [expr [llength $client_list] - 1]
-
- if { $numcl == 0 } {
- set clindex [berkdb random_int 0 $cl_len]
- set nclients [lindex $client_list $clindex]
- } else {
- set nclients $numcl
- }
- set drindex [berkdb random_int 0 $drop_len]
- set droppct [lindex $drop_list $drindex]
- set do_sec [berkdb random_int 0 1]
- set do_oob [berkdb random_int 0 1]
- set do_del [berkdb random_int 0 1]
-
- if { $display == 1 } {
- puts $outfile "eval run_reptest $method $test $droppct \
- $nclients $do_del $do_sec $do_oob $largs"
- }
- if { $run == 1 } {
- run_reptest $method $test $droppct $nclients $do_del \
- $do_sec $do_oob $largs
- }
-}
-
-#
-# Run method tests, each in its own, new environment. (As opposed to
-# run_envmethod1 which runs all the tests in a single environment.)
-#
-proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \
- { largs "" } } {
- global __debug_on
- global __debug_print
- global __debug_test
- global is_envmethod
- global test_names
- global parms
- source ./include.tcl
-
- set save_largs $largs
- set envargs ""
- env_cleanup $testdir
-
- if { $display == 1 } {
- puts $outfile "eval run_envmethod $method \
- $test 0 1 stdout $largs"
- }
-
- # To run a normal test using system memory, call run_envmethod
- # with the flag -shm.
- set sindex [lsearch -exact $largs "-shm"]
- if { $sindex >= 0 } {
- if { [mem_chk " -system_mem -shm_key 1 "] == 1 } {
- break
- } else {
- append envargs " -system_mem -shm_key 1 "
- set largs [lreplace $largs $sindex $sindex]
- }
- }
-
- # Test for -thread option and pass to berkdb_env open. Leave in
- # $largs because -thread can also be passed to an individual
- # test as an arg. Double the number of lockers because a threaded
- # env requires more than an ordinary env.
- if { [lsearch -exact $largs "-thread"] != -1 } {
- append envargs " -thread -lock_max_lockers 2000 "
- }
-
- # Test for -alloc option and pass to berkdb_env open only.
- # Remove from largs because -alloc is not an allowed test arg.
- set aindex [lsearch -exact $largs "-alloc"]
- if { $aindex >= 0 } {
- append envargs " -alloc "
- set largs [lreplace $largs $aindex $aindex]
- }
-
- if { $run == 1 } {
- set is_envmethod 1
- set stat [catch {
- check_handles
- set env [eval {berkdb_env -create -txn \
- -mode 0644 -home $testdir} $envargs]
- error_check_good env_open [is_valid_env $env] TRUE
- append largs " -env $env "
-
- puts "[timestamp]"
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in\
- testparams.tcl; skipping."
- continue
- }
- eval $test $method $parms($test) $largs
-
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- set largs $save_largs
- error_check_good envclose [$env close] 0
- error_check_good envremove [berkdb envremove \
- -home $testdir] 0
- } res]
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_envmethod: $method $test: $theError"
- } else {
- error $theError;
- }
- }
- set is_envmethod 0
- }
-}
-
-proc run_recd { method test {run 1} {display 0} args } {
- global __debug_on
- global __debug_print
- global __debug_test
- global parms
- global test_names
- global log_log_record_types
- global gen_upgrade_log
- global upgrade_be
- global upgrade_dir
- global upgrade_method
- global upgrade_name
- source ./include.tcl
-
- if { $run == 1 } {
- puts "run_recd: $method $test $parms($test) $args"
- }
- if {[catch {
- if { $display } {
- puts "eval $test $method $parms($test) $args"
- }
- if { $run } {
- check_handles
- set upgrade_method $method
- set upgrade_name $test
- puts "[timestamp]"
- # By redirecting stdout to stdout, we make exec
- # print output rather than simply returning it.
- # By redirecting stderr to stdout too, we make
- # sure everything winds up in the ALL.OUT file.
- set ret [catch { exec $tclsh_path << \
- "source $test_path/test.tcl; \
- set log_log_record_types $log_log_record_types;\
- set gen_upgrade_log $gen_upgrade_log;\
- set upgrade_be $upgrade_be; \
- set upgrade_dir $upgrade_dir; \
- set upgrade_method $upgrade_method; \
- set upgrade_name $upgrade_name; \
- eval $test $method $parms($test) $args" \
- >&@ stdout
- } res]
-
- # Don't die if the test failed; we want
- # to just proceed.
- if { $ret != 0 } {
- puts "FAIL:[timestamp] $res"
- }
-
- if { $__debug_print != 0 } {
- puts ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- flush stdout
- flush stderr
- }
- } res] != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_recd: $method: $theError"
- } else {
- error $theError;
- }
- }
-}
-
-proc run_recds { {run 1} {display 0} args } {
- source ./include.tcl
- global log_log_record_types
- global test_names
- global gen_upgrade_log
- global encrypt
-
- set log_log_record_types 1
- logtrack_init
-
- foreach method \
- "btree rbtree hash queue queueext recno frecno rrecno" {
- check_handles
-#set test_names(recd) "recd005 recd017"
- foreach test $test_names(recd) {
- # Skip recd017 for non-crypto upgrade testing.
- # Run only recd017 for crypto upgrade testing.
- if { $gen_upgrade_log == 1 && $test == "recd017" && \
- $encrypt == 0 } {
- puts "Skipping recd017 for non-crypto run."
- continue
- }
- if { $gen_upgrade_log == 1 && $test != "recd017" && \
- $encrypt == 1 } {
- puts "Skipping $test for crypto run."
- continue
- }
- if { [catch {eval \
- run_recd $method $test $run $display \
- $args} ret ] != 0 } {
- puts $ret
- }
- if { $gen_upgrade_log == 1 } {
- save_upgrade_files $testdir
- }
- }
- }
- # We can skip logtrack_summary during the crypto upgrade run -
- # it doesn't introduce any new log types.
- if { $run } {
- if { $gen_upgrade_log == 0 || $encrypt == 0 } {
- logtrack_summary
- }
- }
- set log_log_record_types 0
-}
-
-proc run_all { { testname ALL } args } {
- global test_names
- global one_test
- global has_crypto
- source ./include.tcl
-
- fileremove -f ALL.OUT
-
- set one_test $testname
- if { $one_test != "ALL" } {
- # Source testparams again to adjust test_names.
- source $test_path/testparams.tcl
- }
-
- set exflgs [eval extractflags $args]
- set flags [lindex $exflgs 1]
- set display 1
- set run 1
- set am_only 0
- set parallel 0
- set nparalleltests 0
- set rflags {--}
- foreach f $flags {
- switch $f {
- m {
- set am_only 1
- }
- n {
- set display 1
- set run 0
- set rflags [linsert $rflags 0 "-n"]
- }
- }
- }
-
- set o [open ALL.OUT a]
- if { $run == 1 } {
- puts -nonewline "Test suite run started at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts [berkdb version -string]
-
- puts -nonewline $o "Test suite run started at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- puts $o [berkdb version -string]
- }
- close $o
- #
- # First run standard tests. Send in a -A to let run_std know
- # that it is part of the "run_all" run, so that it doesn't
- # print out start/end times.
- #
- lappend args -A
- eval {run_std} $one_test $args
-
- set test_pagesizes [get_test_pagesizes]
- set args [lindex $exflgs 0]
- set save_args $args
-
- foreach pgsz $test_pagesizes {
- set args $save_args
- append args " -pagesize $pgsz -chksum"
- if { $am_only == 0 } {
- # Run recovery tests.
- #
- # XXX These don't actually work at multiple pagesizes;
- # disable them for now.
- #
- # XXX These too are broken into separate tclsh
- # instantiations so we don't require so much
- # memory, but I think it's cleaner
- # and more useful to do it down inside proc r than here,
- # since "r recd" gets done a lot and needs to work.
- #
- # XXX See comment in run_std for why this only directs
- # stdout and not stderr. Don't worry--the right stuff
- # happens.
- #puts "Running recovery tests with pagesize $pgsz"
- #if [catch {exec $tclsh_path \
- # << "source $test_path/test.tcl; \
- # r $rflags recd $args" \
- # 2>@ stderr >> ALL.OUT } res] {
- # set o [open ALL.OUT a]
- # puts $o "FAIL: recd test:"
- # puts $o $res
- # close $o
- #}
- }
-
- # Access method tests.
- # Run subdb tests with varying pagesizes too.
- # XXX
- # Broken up into separate tclsh instantiations so
- # we don't require so much memory.
- foreach method \
- "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $method tests with pagesize $pgsz"
- foreach sub {test sdb} {
- foreach test $test_names($sub) {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- eval {run_method -$method \
- $test $display $run $o} \
- $args
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path << \
- "global one_test; \
- set one_test $one_test; \
- source $test_path/test.tcl; \
- eval {run_method -$method \
- $test $display $run \
- stdout} $args" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: \
- -$method $test: $res"
- close $o
- }
- }
- }
- }
- }
- }
- set args $save_args
- #
- # Run access method tests at default page size in one env.
- #
- foreach method "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $method tests in a txn env"
- foreach sub {test sdb} {
- foreach test $test_names($sub) {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- run_envmethod -$method $test $display \
- $run $o $args
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path << \
- "global one_test; \
- set one_test $one_test; \
- source $test_path/test.tcl; \
- run_envmethod -$method $test \
- $display $run stdout $args" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: run_envmethod \
- $method $test: $res"
- close $o
- }
- }
- }
- }
- }
- #
- # Run access method tests at default page size in thread-enabled env.
- # We're not truly running threaded tests, just testing the interface.
- #
- foreach method "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $method tests in a threaded txn env"
- foreach sub {test sdb} {
- foreach test $test_names($sub) {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- eval {run_envmethod -$method $test \
- $display $run $o -thread}
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path << \
- "global one_test; \
- set one_test $one_test; \
- source $test_path/test.tcl; \
- eval {run_envmethod -$method $test \
- $display $run stdout -thread}" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: run_envmethod \
- $method $test -thread: $res"
- close $o
- }
- }
- }
- }
- }
- #
- # Run access method tests at default page size with -alloc enabled.
- #
- foreach method "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $method tests in an env with -alloc"
- foreach sub {test sdb} {
- foreach test $test_names($sub) {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- eval {run_envmethod -$method $test \
- $display $run $o -alloc}
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path << \
- "global one_test; \
- set one_test $one_test; \
- source $test_path/test.tcl; \
- eval {run_envmethod -$method $test \
- $display $run stdout -alloc}" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: run_envmethod \
- $method $test -alloc: $res"
- close $o
- }
- }
- }
- }
- }
- #
- # Run tests using proc r. The replication tests have been
- # moved from run_std to run_all.
- #
- set test_list [list {"replication" "rep"}]
- #
- # If release supports encryption, run security tests.
- #
- if { $has_crypto == 1 } {
- lappend test_list {"security" "sec"}
- }
- #
- # If configured for RPC, then run rpc tests too.
- #
- if { [file exists ./berkeley_db_svc] ||
- [file exists ./berkeley_db_cxxsvc] ||
- [file exists ./berkeley_db_javasvc] } {
- lappend test_list {"RPC" "rpc"}
- }
-
- foreach pair $test_list {
- set msg [lindex $pair 0]
- set cmd [lindex $pair 1]
- puts "Running $msg tests"
- if [catch {exec $tclsh_path << \
- "global one_test; set one_test $one_test; \
- source $test_path/test.tcl; \
- r $rflags $cmd $args" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: $cmd test: $res"
- close $o
- }
- }
-
- # If not actually running, no need to check for failure.
- if { $run == 0 } {
- return
- }
-
- set failed 0
- set o [open ALL.OUT r]
- while { [gets $o line] >= 0 } {
- if { [regexp {^FAIL} $line] != 0 } {
- set failed 1
- }
- }
- close $o
- set o [open ALL.OUT a]
- if { $failed == 0 } {
- puts "Regression Tests Succeeded"
- puts $o "Regression Tests Succeeded"
- } else {
- puts "Regression Tests Failed; see ALL.OUT for log"
- puts $o "Regression Tests Failed"
- }
-
- puts -nonewline "Test suite run completed at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts -nonewline $o "Test suite run completed at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- close $o
-}
-
-#
-# Run method tests in one environment. (As opposed to run_envmethod
-# which runs each test in its own, new environment.)
-#
-proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } {
- global __debug_on
- global __debug_print
- global __debug_test
- global is_envmethod
- global test_names
- global parms
- source ./include.tcl
-
- if { $run == 1 } {
- puts "run_envmethod1: $method $args"
- }
-
- set is_envmethod 1
- if { $run == 1 } {
- check_handles
- env_cleanup $testdir
- error_check_good envremove [berkdb envremove -home $testdir] 0
- set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
- {-mode 0644 -home $testdir}]
- error_check_good env_open [is_valid_env $env] TRUE
- append largs " -env $env "
- }
-
- if { $display } {
- # The envmethod1 tests can't be split up, since they share
- # an env.
- puts $outfile "eval run_envmethod1 $method $args"
- }
-
- set stat [catch {
- foreach test $test_names(test) {
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in\
- testparams.tcl; skipping."
- continue
- }
- if { $run } {
- puts $outfile "[timestamp]"
- eval $test $method $parms($test) $largs
- if { $__debug_print != 0 } {
- puts $outfile ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- }
- flush stdout
- flush stderr
- }
- } res]
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_envmethod: $method $test: $theError"
- } else {
- error $theError;
- }
- }
- set stat [catch {
- foreach test $test_names(test) {
- if { [info exists parms($test)] != 1 } {
- puts stderr "$test disabled in\
- testparams.tcl; skipping."
- continue
- }
- if { $run } {
- puts $outfile "[timestamp]"
- eval $test $method $parms($test) $largs
- if { $__debug_print != 0 } {
- puts $outfile ""
- }
- if { $__debug_on != 0 } {
- debug $__debug_test
- }
- }
- flush stdout
- flush stderr
- }
- } res]
- if { $stat != 0} {
- global errorInfo;
-
- set fnl [string first "\n" $errorInfo]
- set theError [string range $errorInfo 0 [expr $fnl - 1]]
- if {[string first FAIL $errorInfo] == -1} {
- error "FAIL:[timestamp]\
- run_envmethod1: $method $test: $theError"
- } else {
- error $theError;
- }
- }
- if { $run == 1 } {
- error_check_good envclose [$env close] 0
- check_handles $outfile
- }
- set is_envmethod 0
-
-}
-
-# Run the secondary index tests.
-proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } {
- global test_names
- global testdir
- global verbose_check_secondaries
- set verbose_check_secondaries $verbose
- # Standard number of secondary indices to create if a single-element
- # list of methods is passed into the secondary index tests.
- global nsecondaries
- set nsecondaries 2
-
- # Run basic tests with a single secondary index and a small number
- # of keys, then again with a larger number of keys. (Note that
- # we can't go above 5000, since we use two items from our
- # 10K-word list for each key/data pair.)
- foreach n { 200 5000 } {
- foreach pm { btree hash recno frecno queue queueext } {
- foreach sm { dbtree dhash ddbtree ddhash btree hash } {
- foreach test $test_names(si) {
- if { $display } {
- puts -nonewline $outfile \
- "eval $test {\[list\
- $pm $sm $sm\]} $n ;"
- puts $outfile " verify_dir \
- $testdir \"\" 1"
- }
- if { $run } {
- check_handles $outfile
- eval $test \
- {[list $pm $sm $sm]} $n
- verify_dir $testdir "" 1
- }
- }
- }
- }
- }
-
- # Run tests with 20 secondaries.
- foreach pm { btree hash } {
- set methlist [list $pm]
- for { set j 1 } { $j <= 20 } {incr j} {
- # XXX this should incorporate hash after #3726
- if { $j % 2 == 0 } {
- lappend methlist "dbtree"
- } else {
- lappend methlist "ddbtree"
- }
- }
- foreach test $test_names(si) {
- if { $display } {
- puts "eval $test {\[list $methlist\]} 500"
- }
- if { $run } {
- eval $test {$methlist} 500
- }
- }
- }
-}
-
-# Run secondary index join test. (There's no point in running
-# this with both lengths, the primary is unhappy for now with fixed-
-# length records (XXX), and we need unsorted dups in the secondaries.)
-proc sijoin { {display 0} {run 1} {outfile stdout} } {
- foreach pm { btree hash recno } {
- if { $display } {
- foreach sm { btree hash } {
- puts $outfile "eval sijointest\
- {\[list $pm $sm $sm\]} 1000"
- }
- puts $outfile "eval sijointest\
- {\[list $pm btree hash\]} 1000"
- puts $outfile "eval sijointest\
- {\[list $pm hash btree\]} 1000"
- }
- if { $run } {
- foreach sm { btree hash } {
- eval sijointest {[list $pm $sm $sm]} 1000
- }
- eval sijointest {[list $pm btree hash]} 1000
- eval sijointest {[list $pm hash btree]} 1000
- }
- }
-}
-
-proc run { proc_suffix method {start 1} {stop 999} } {
- global test_names
-
- switch -exact -- $proc_suffix {
- envmethod -
- method -
- recd -
- repmethod -
- reptest -
- secenv -
- secmethod {
- # Run_recd runs the recd tests, all others
- # run the "testxxx" tests.
- if { $proc_suffix == "recd" } {
- set testtype recd
- } else {
- set testtype test
- }
-
- for { set i $start } { $i <= $stop } { incr i } {
- set name [format "%s%03d" $testtype $i]
- # If a test number is missing, silently skip
- # to next test; sparse numbering is allowed.
- if { [lsearch -exact $test_names($testtype) \
- $name] == -1 } {
- continue
- }
- run_$proc_suffix $method $name
- }
- }
- default {
- puts "$proc_suffix is not set up with to be used with run"
- }
- }
-}
-
-
-# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
-# of these is the default pagesize. We don't want to run all the AM tests
-# twice, so figure out what the default page size is, then return the
-# other two.
-proc get_test_pagesizes { } {
- # Create an in-memory database.
- set db [berkdb_open -create -btree]
- error_check_good gtp_create [is_valid_db $db] TRUE
- set statret [$db stat]
- set pgsz 0
- foreach pair $statret {
- set fld [lindex $pair 0]
- if { [string compare $fld {Page size}] == 0 } {
- set pgsz [lindex $pair 1]
- }
- }
-
- error_check_good gtp_close [$db close] 0
-
- error_check_bad gtp_pgsz $pgsz 0
- switch $pgsz {
- 512 { return {8192 65536} }
- 8192 { return {512 65536} }
- 65536 { return {512 8192} }
- default { return {512 8192 65536} }
- }
- error_check_good NOTREACHED 0 1
-}
diff --git a/storage/bdb/test/test001.tcl b/storage/bdb/test/test001.tcl
deleted file mode 100644
index 2d7130fcbbc..00000000000
--- a/storage/bdb/test/test001.tcl
+++ /dev/null
@@ -1,222 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test001.tcl,v 11.38 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test001
-# TEST Small keys/data
-# TEST Put/get per key
-# TEST Dump file
-# TEST Close, reopen
-# TEST Dump file
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-proc test001 { method {nentries 10000} \
- {start 0} {skip 0} {tnum "001"} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- # Create the database and open the dictionary
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- # If we are not using an external env, then test setting
- # the database cache size and using multiple caches.
- set txnenv 0
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- append args " -cachesize {0 1048576 3} "
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test$tnum: $method ($args) $nentries equal key/data pairs"
- set did [open $dict]
-
- # The "start" variable determines the record number to start
- # with, if we're using record numbers. The "skip" variable
- # determines the dictionary entry to start with.
- # In normal use, skip will match start.
-
- puts "\tTest$tnum: Starting at $start with dictionary entry $skip"
- if { $skip != 0 } {
- for { set count 0 } { $count < $skip } { incr count } {
- gets $did str
- }
- }
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set temp $testdir/temp
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args $omethod $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test001_recno.check
- append gflags " -recno"
- } else {
- set checkfunc test001.check
- }
- puts "\tTest$tnum.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1 + $start]
- if { 0xffffffff > 0 && $key > 0xffffffff } {
- set key [expr $key - 0x100000000]
- }
- if { $key == 0 || $key - 0xffffffff == 1 } {
- incr key
- incr count
- }
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- set str [reverse $str]
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval \
- {$db put} $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- if { $count % 50 == 0 } {
- error_check_good txn_checkpoint($count) \
- [$env txn_checkpoint] 0
- }
- }
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
-
- # Test DB_GET_BOTH for success
- set ret [$db get -get_both $key [pad_data $method $str]]
- error_check_good \
- getboth $ret [list [list $key [pad_data $method $str]]]
-
- # Test DB_GET_BOTH for failure
- set ret [$db get -get_both $key [pad_data $method BAD$str]]
- error_check_good getbothBAD [llength $ret] 0
-
- incr count
- }
- close $did
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # Now we will get each key from the DB and compare the results
- # to the original.
-
- puts "\tTest$tnum.b: dump file"
- dump_file $db $txn $t1 $checkfunc
- #
- # dump_file should just have been "get" calls, so
- # aborting a get should really be a no-op. Abort
- # just for the fun of it.
- if { $txnenv == 1 } {
- error_check_good txn [$t abort] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for { set i 1 } { $i <= $nentries } { incr i } {
- set j [expr $i + $start]
- if { 0xffffffff > 0 && $j > 0xffffffff } {
- set j [expr $j - 0x100000000]
- }
- if { $j == 0 } {
- incr i
- incr j
- }
- puts $oid $j
- }
- close $oid
- } else {
- filehead [expr $nentries + $start] $dict $t2 [expr $start + 1]
- }
- filesort $t2 $temp
- file rename -force $temp $t2
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tTest$tnum.c: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction "-first" "-next"
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Test$tnum:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction "-last" "-prev"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-}
-
-# Check function for test001; keys and data are identical
-proc test001.check { key data } {
- error_check_good "key/data mismatch" $data [reverse $key]
-}
-
-proc test001_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/test002.tcl b/storage/bdb/test/test002.tcl
deleted file mode 100644
index 265f0640ff3..00000000000
--- a/storage/bdb/test/test002.tcl
+++ /dev/null
@@ -1,161 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test002.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test002
-# TEST Small keys/medium data
-# TEST Put/get per key
-# TEST Dump file
-# TEST Close, reopen
-# TEST Dump file
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and a fixed, medium length data string;
-# TEST retrieve each. After all are entered, retrieve all; compare output
-# TEST to original. Close file, reopen, do retrieve and re-verify.
-
-proc test002 { method {nentries 10000} args } {
- global datastr
- global pad_datastr
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test002.db
- set env NULL
- } else {
- set testfile test002.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- # Create the database and open the dictionary
- puts "Test002: $method ($args) $nentries key <fixed data> pairs"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
-
- if { [is_record_based $method] == 1 } {
- append gflags "-recno"
- }
- set pad_datastr [pad_data $method $datastr]
- puts "\tTest002.a: put/get loop"
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set ret [eval {$db get} $gflags {$key}]
-
- error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest002.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test002.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- filesort $t2 $t3
- file rename -force $t3 $t2
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- }
- filesort $t1 $t3
-
- error_check_good Test002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again.
- puts "\tTest002.c: close, open, and dump file"
- open_and_dump_file $testfile $env $t1 test002.check \
- dump_file_direction "-first" "-next"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
- error_check_good Test002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest002.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 test002.check \
- dump_file_direction "-last" "-prev"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
- error_check_good Test002:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-}
-
-# Check function for test002; data should be fixed are identical
-proc test002.check { key data } {
- global pad_datastr
- error_check_good "data mismatch for key $key" $data $pad_datastr
-}
diff --git a/storage/bdb/test/test003.tcl b/storage/bdb/test/test003.tcl
deleted file mode 100644
index 4e9c0125d3c..00000000000
--- a/storage/bdb/test/test003.tcl
+++ /dev/null
@@ -1,205 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test003.tcl,v 11.28 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test003
-# TEST Small keys/large data
-# TEST Put/get per key
-# TEST Dump file
-# TEST Close, reopen
-# TEST Dump file
-# TEST
-# TEST Take the source files and dbtest executable and enter their names
-# TEST as the key with their contents as data. After all are entered,
-# TEST retrieve all; compare output to original. Close file, reopen, do
-# TEST retrieve and re-verify.
-proc test003 { method args} {
- global names
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if {[is_fixed_length $method] == 1} {
- puts "Test003 skipping for method $method"
- return
- }
- puts "Test003: $method ($args) filename=key filecontents=data pairs"
-
- # Create the database and open the dictionary
- set limit 0
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test003.db
- set env NULL
- } else {
- set testfile test003.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- set limit 100
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args $omethod $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set pflags ""
- set gflags ""
- set txn ""
- if { [is_record_based $method] == 1 } {
- set checkfunc test003_recno.check
- append gflags "-recno"
- } else {
- set checkfunc test003.check
- }
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list]
- set len [llength $file_list]
- puts "\tTest003.a: put/get loop $len entries"
- set count 0
- foreach f $file_list {
- if { [string compare [file type $f] "file"] != 0 } {
- continue
- }
-
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set names([expr $count + 1]) $f
- } else {
- set key $f
- }
-
- # Should really catch errors
- set fid [open $f r]
- fconfigure $fid -translation binary
- set data [read $fid]
- close $fid
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Should really catch errors
- set fid [open $t4 w]
- fconfigure $fid -translation binary
- if [catch {eval {$db get} $gflags {$key}} data] {
- puts -nonewline $fid $data
- } else {
- # Data looks like {{key data}}
- set key [lindex [lindex $data 0] 0]
- set data [lindex [lindex $data 0] 1]
- puts -nonewline $fid [pad_data $method $data]
- }
- close $fid
-
- error_check_good \
- Test003:diff($f,$t4) [filecmp $f $t4] 0
-
- incr count
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest003.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the entries in the
- # current directory
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $count} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set oid [open $t2.tmp w]
- foreach f $file_list {
- if { [string compare [file type $f] "file"] != 0 } {
- continue
- }
- puts $oid $f
- }
- close $oid
- filesort $t2.tmp $t2
- fileremove $t2.tmp
- filesort $t1 $t3
- }
-
- error_check_good \
- Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again.
- puts "\tTest003.c: close, open, and dump file"
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_bin_file_direction "-first" "-next"
-
- if { [is_record_based $method] == 1 } {
- filesort $t1 $t3 -n
- }
-
- error_check_good \
- Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest003.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_bin_file_direction "-last" "-prev"
-
- if { [is_record_based $method] == 1 } {
- filesort $t1 $t3 -n
- }
-
- error_check_good \
- Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
-}
-
-# Check function for test003; key should be file name; data should be contents
-proc test003.check { binfile tmpfile } {
- source ./include.tcl
-
- error_check_good Test003:datamismatch($binfile,$tmpfile) \
- [filecmp $binfile $tmpfile] 0
-}
-proc test003_recno.check { binfile tmpfile } {
- global names
- source ./include.tcl
-
- set fname $names($binfile)
- error_check_good key"$binfile"_exists [info exists names($binfile)] 1
- error_check_good Test003:datamismatch($fname,$tmpfile) \
- [filecmp $fname $tmpfile] 0
-}
diff --git a/storage/bdb/test/test004.tcl b/storage/bdb/test/test004.tcl
deleted file mode 100644
index 598e3eb2299..00000000000
--- a/storage/bdb/test/test004.tcl
+++ /dev/null
@@ -1,169 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test004.tcl,v 11.24 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test004
-# TEST Small keys/medium data
-# TEST Put/get per key
-# TEST Sequential (cursor) get/delete
-# TEST
-# TEST Check that cursor operations work. Create a database.
-# TEST Read through the database sequentially using cursors and
-# TEST delete each element.
-proc test004 { method {nentries 10000} {reopen "004"} {build_only 0} args} {
- source ./include.tcl
-
- set do_renumber [is_rrecno $method]
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set tnum test$reopen
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/$tnum.db
- set env NULL
- } else {
- set testfile $tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
-
- puts -nonewline "$tnum:\
- $method ($args) $nentries delete small key; medium data pairs"
- if {$reopen == "005"} {
- puts "(with close)"
- } else {
- puts ""
- }
-
- # Create the database and open the dictionary
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- # Here is the loop where we put and get each key/data pair
- set kvals ""
- puts "\tTest$reopen.a: put/get loop"
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- lappend kvals $str
- } else {
- set key $str
- }
-
- set datastr [ make_data_str $str ]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags \
- {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good "$tnum:put" $ret \
- [list [list $key [pad_data $method $datastr]]]
- incr count
- }
- close $did
- if { $build_only == 1 } {
- return $db
- }
- if { $reopen == "005" } {
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $args {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
- puts "\tTest$reopen.b: get/delete loop"
- # Now we will get each key from the DB and compare the results
- # to the original, then delete it.
- set outf [open $t1 w]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set c [eval {$db cursor} $txn]
-
- set count 0
- for {set d [$c get -first] } { [llength $d] != 0 } {
- set d [$c get -next] } {
- set k [lindex [lindex $d 0] 0]
- set d2 [lindex [lindex $d 0] 1]
- if { [is_record_based $method] == 1 } {
- set datastr \
- [make_data_str [lindex $kvals [expr $k - 1]]]
- } else {
- set datastr [make_data_str $k]
- }
- error_check_good $tnum:$k $d2 [pad_data $method $datastr]
- puts $outf $k
- $c del
- if { [is_record_based $method] == 1 && \
- $do_renumber == 1 } {
- set kvals [lreplace $kvals 0 0]
- }
- incr count
- }
- close $outf
- error_check_good curs_close [$c close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now compare the keys to see if they match the dictionary
- if { [is_record_based $method] == 1 } {
- error_check_good test$reopen:keys_deleted $count $nentries
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- error_check_good Test$reopen:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- }
-
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test005.tcl b/storage/bdb/test/test005.tcl
deleted file mode 100644
index e3972b5b127..00000000000
--- a/storage/bdb/test/test005.tcl
+++ /dev/null
@@ -1,19 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test005.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test005
-# TEST Small keys/medium data
-# TEST Put/get per key
-# TEST Close, reopen
-# TEST Sequential (cursor) get/delete
-# TEST
-# TEST Check that cursor operations work. Create a database; close
-# TEST it and reopen it. Then read through the database sequentially
-# TEST using cursors and delete each element.
-proc test005 { method {nentries 10000} args } {
- eval {test004 $method $nentries "005" 0} $args
-}
diff --git a/storage/bdb/test/test006.tcl b/storage/bdb/test/test006.tcl
deleted file mode 100644
index 4107bbf090c..00000000000
--- a/storage/bdb/test/test006.tcl
+++ /dev/null
@@ -1,195 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test006.tcl,v 11.26 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test006
-# TEST Small keys/medium data
-# TEST Put/get per key
-# TEST Keyed delete and verify
-# TEST
-# TEST Keyed delete test.
-# TEST Create database.
-# TEST Go through database, deleting all entries by key.
-# TEST Then do the same for unsorted and sorted dups.
-proc test006 { method {nentries 10000} {reopen 0} {tnum "006"} \
- {ndups 5} args } {
-
- test006_body $method $nentries $reopen $tnum 1 "" "" $args
-
- # For methods supporting dups, run the test with sorted and
- # with unsorted dups.
- if { [is_btree $method] == 1 || [is_hash $method] == 1 } {
- foreach {sort flags} {unsorted -dup sorted "-dup -dupsort"} {
- test006_body $method $nentries $reopen \
- $tnum $ndups $sort $flags $args
- }
- }
-}
-
-proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \
- {ndups 5} sort flags {largs ""} } {
- global is_je_test
- source ./include.tcl
-
- set do_renumber [is_rrecno $method]
- set largs [convert_args $method $largs]
- set omethod [convert_method $method]
-
- set tname Test$tnum
- set dbname test$tnum
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $largs "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set basename $testdir/$dbname
- set env NULL
- } else {
- set basename $dbname
- incr eindex
- set env [lindex $largs $eindex]
- if { $is_je_test && $sort == "unsorted" } {
- puts "Test$tnum skipping $sort duplicates for JE"
- return
- }
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append largs " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts -nonewline "$tname: $method ($flags $largs) "
- puts -nonewline "$nentries equal small key; medium data pairs"
- if {$reopen == 1} {
- puts " (with close)"
- } else {
- puts ""
- }
-
- set pflags ""
- set gflags ""
- set txn ""
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- cleanup $testdir $env
-
- # Here is the loop where we put and get each key/data pair.
-
- set count 0
- set testfile $basename$sort.db
- set db [eval {berkdb_open -create \
- -mode 0644} $largs $flags {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\t$tname.a: put/get loop"
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1 ]
- } else {
- set key $str
- }
-
- set str [make_data_str $str]
- for { set j 1 } { $j <= $ndups } {incr j} {
- set datastr $j$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn \
- [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags \
- {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn \
- [$t commit] 0
- }
- }
- incr count
- }
- close $did
-
- # Close and reopen database, if testing reopen.
-
- if { $reopen == 1 } {
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $largs {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original, then delete it.
-
- puts "\t$tname.b: get/delete loop"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- set i 1
- for { set ret [$dbc get -first] } \
- { [string length $ret] != 0 } \
- { set ret [$dbc get -next] } {
- set key [lindex [lindex $ret 0] 0]
- set data [lindex [lindex $ret 0] 1]
- if { $i == 1 } {
- set curkey $key
- }
- error_check_good seq_get:key $key $curkey
-
- if { $i == $ndups } {
- set i 1
- } else {
- incr i
- }
-
- # Now delete the key
- set ret [$dbc del]
- error_check_good db_del:$key $ret 0
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\t$tname.c: verify empty file"
- # Double check that file is now empty
- set db [eval {berkdb_open} $largs $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set ret [$dbc get -first]
- error_check_good get_on_empty [string length $ret] 0
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test007.tcl b/storage/bdb/test/test007.tcl
deleted file mode 100644
index 6cb57495add..00000000000
--- a/storage/bdb/test/test007.tcl
+++ /dev/null
@@ -1,19 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test007.tcl,v 11.12 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test007
-# TEST Small keys/medium data
-# TEST Put/get per key
-# TEST Close, reopen
-# TEST Keyed delete
-# TEST
-# TEST Check that delete operations work. Create a database; close
-# TEST database and reopen it. Then issues delete by key for each
-# TEST entry. (Test006 plus reopen)
-proc test007 { method {nentries 10000} {tnum "007"} {ndups 5} args} {
- eval {test006 $method $nentries 1 $tnum $ndups} $args
-}
diff --git a/storage/bdb/test/test008.tcl b/storage/bdb/test/test008.tcl
deleted file mode 100644
index d798d3fb22d..00000000000
--- a/storage/bdb/test/test008.tcl
+++ /dev/null
@@ -1,200 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test008.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test008
-# TEST Small keys/large data
-# TEST Put/get per key
-# TEST Loop through keys by steps (which change)
-# TEST ... delete each key at step
-# TEST ... add each key back
-# TEST ... change step
-# TEST Confirm that overflow pages are getting reused
-# TEST
-# TEST Take the source files and dbtest executable and enter their names as
-# TEST the key with their contents as data. After all are entered, begin
-# TEST looping through the entries; deleting some pairs and then readding them.
-proc test008 { method {reopen "008"} {debug 0} args} {
- source ./include.tcl
-
- set tnum test$reopen
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 } {
- puts "Test$reopen skipping for method $method"
- return
- }
-
- puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
- if {$reopen == "009"} {
- puts "(with close)"
- } else {
- puts ""
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/$tnum.db
- set env NULL
- } else {
- set testfile $tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list]
-
- set count 0
- puts "\tTest$reopen.a: Initial put/get loop"
- foreach f $file_list {
- set names($count) $f
- set key $f
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- put_file $db $txn $pflags $f
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- get_file $db $txn $gflags $f $t4
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good Test$reopen:diff($f,$t4) \
- [filecmp $f $t4] 0
-
- incr count
- }
-
- if {$reopen == "009"} {
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
-
- # Now we will get step through keys again (by increments) and
- # delete all the entries, then re-insert them.
-
- puts "\tTest$reopen.b: Delete re-add loop"
- foreach i "1 2 4 8 16" {
- for {set ndx 0} {$ndx < $count} { incr ndx $i} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db del} $txn {$names($ndx)}]
- error_check_good db_del:$names($ndx) $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- for {set ndx 0} {$ndx < $count} { incr ndx $i} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- put_file $db $txn $pflags $names($ndx)
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- }
-
- if {$reopen == "009"} {
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
-
- # Now, reopen the file and make sure the key/data pairs look right.
- puts "\tTest$reopen.c: Dump contents forward"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file $db $txn $t1 test008.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set oid [open $t2.tmp w]
- foreach f $file_list {
- puts $oid $f
- }
- close $oid
- filesort $t2.tmp $t2
- fileremove $t2.tmp
- filesort $t1 $t3
-
- error_check_good Test$reopen:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest$reopen.d: Dump contents backward"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- filesort $t1 $t3
-
- error_check_good Test$reopen:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- error_check_good close:$db [$db close] 0
-}
-
-proc test008.check { binfile tmpfile } {
- global tnum
- source ./include.tcl
-
- error_check_good diff($binfile,$tmpfile) \
- [filecmp $binfile $tmpfile] 0
-}
diff --git a/storage/bdb/test/test009.tcl b/storage/bdb/test/test009.tcl
deleted file mode 100644
index 258b01277ec..00000000000
--- a/storage/bdb/test/test009.tcl
+++ /dev/null
@@ -1,18 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test009.tcl,v 11.11 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test009
-# TEST Small keys/large data
-# TEST Same as test008; close and reopen database
-# TEST
-# TEST Check that we reuse overflow pages. Create database with lots of
-# TEST big key/data pairs. Go through and delete and add keys back
-# TEST randomly. Then close the DB and make sure that we have everything
-# TEST we think we should.
-proc test009 { method args} {
- eval {test008 $method "009" 0} $args
-}
diff --git a/storage/bdb/test/test010.tcl b/storage/bdb/test/test010.tcl
deleted file mode 100644
index bbf6fba679f..00000000000
--- a/storage/bdb/test/test010.tcl
+++ /dev/null
@@ -1,176 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test010.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test010
-# TEST Duplicate test
-# TEST Small key/data pairs.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; add duplicate records for each.
-# TEST After all are entered, retrieve all; verify output.
-# TEST Close file, reopen, do retrieve and re-verify.
-# TEST This does not work for recno
-proc test010 { method {nentries 10000} {ndups 5} {tnum "010"} args } {
- source ./include.tcl
-
- set omethod $method
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- puts "Test$tnum: $method ($args) $nentries \
- small $ndups dup key/data pairs"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644 -dup} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Now retrieve all the keys matching this key
- set x 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- for {set ret [$dbc get "-set" $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get "-next"] } {
- if {[llength $ret] == 0} {
- break
- }
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- error_check_good "Test$tnum:get" $d $str
- set id [ id_of $datastr ]
- error_check_good "Test$tnum:dup#" $id $x
- incr x
- }
- error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.a: Checking file for correct duplicates"
- set dlist ""
- for { set i 1 } { $i <= $ndups } {incr i} {
- lappend dlist $i
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now compare the keys to see if they match the dictionary entries
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.b: Checking file for correct duplicates after close"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now compare the keys to see if they match the dictionary entries
- filesort $t1 $t3
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test011.tcl b/storage/bdb/test/test011.tcl
deleted file mode 100644
index ad8439011c8..00000000000
--- a/storage/bdb/test/test011.tcl
+++ /dev/null
@@ -1,470 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test011.tcl,v 11.30 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test011
-# TEST Duplicate test
-# TEST Small key/data pairs.
-# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
-# TEST To test off-page duplicates, run with small pagesize.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; add duplicate records for each.
-# TEST Then do some key_first/key_last add_before, add_after operations.
-# TEST This does not work for recno
-# TEST
-# TEST To test if dups work when they fall off the main page, run this with
-# TEST a very tiny page size.
-proc test011 { method {nentries 10000} {ndups 5} {tnum "011"} args } {
- global dlist
- global rand_init
- source ./include.tcl
-
- set dlist ""
-
- if { [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
- if { [is_record_based $method] == 1 } {
- test011_recno $method $nentries $tnum $args
- return
- }
- if {$ndups < 5} {
- set ndups 5
- }
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- berkdb srand $rand_init
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
-
- puts -nonewline "Test$tnum: $method $nentries small $ndups dup "
- puts "key/data pairs, cursor ops"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set db [eval {berkdb_open -create \
- -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- # We will add dups with values 1, 3, ... $ndups. Then we'll add
- # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
- # add before and add after.
- puts "\tTest$tnum.a: put and get duplicate keys."
- set i ""
- for { set i 1 } { $i <= $ndups } { incr i 2 } {
- lappend dlist $i
- }
- set maxodd $i
- while { [gets $did str] != -1 && $count < $nentries } {
- for { set i 1 } { $i <= $ndups } { incr i 2 } {
- set datastr $i:$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags {$str $datastr}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Now retrieve all the keys matching this key
- set x 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- for {set ret [$dbc get "-set" $str ]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get "-next"] } {
- if {[llength $ret] == 0} {
- break
- }
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
-
- error_check_good Test$tnum:put $d $str
- set id [ id_of $datastr ]
- error_check_good Test$tnum:dup# $id $x
- incr x 2
- }
- error_check_good Test$tnum:numdups $x $maxodd
- error_check_good curs_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: \
- traverse entire file checking duplicates before close."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now compare the keys to see if they match the dictionary entries
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.c: \
- traverse entire file checking duplicates after close."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now compare the keys to see if they match the dictionary entries
- filesort $t1 $t3
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tTest$tnum.d: Testing key_first functionality"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- add_dup $db $txn $nentries "-keyfirst" 0 0
- set dlist [linsert $dlist 0 0]
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- puts "\tTest$tnum.e: Testing key_last functionality"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
- lappend dlist [expr $maxodd - 1]
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- puts "\tTest$tnum.f: Testing add_before functionality"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- add_dup $db $txn $nentries "-before" 2 3
- set dlist [linsert $dlist 2 2]
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- puts "\tTest$tnum.g: Testing add_after functionality"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- add_dup $db $txn $nentries "-after" 4 4
- set dlist [linsert $dlist 4 4]
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good db_close [$db close] 0
-}
-
-proc add_dup {db txn nentries flag dataval iter} {
- source ./include.tcl
-
- set dbc [eval {$db cursor} $txn]
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- set datastr $dataval:$str
- set ret [$dbc get "-set" $str]
- error_check_bad "cget(SET)" [is_substr $ret Error] 1
- for { set i 1 } { $i < $iter } { incr i } {
- set ret [$dbc get "-next"]
- error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
- }
-
- if { [string compare $flag "-before"] == 0 ||
- [string compare $flag "-after"] == 0 } {
- set ret [$dbc put $flag $datastr]
- } else {
- set ret [$dbc put $flag $str $datastr]
- }
- error_check_good "$dbc put $flag" $ret 0
- incr count
- }
- close $did
- $dbc close
-}
-
-proc test011_recno { method {nentries 10000} {tnum "011"} largs } {
- global dlist
- source ./include.tcl
-
- set largs [convert_args $method $largs]
- set omethod [convert_method $method]
- set renum [is_rrecno $method]
-
- puts "Test$tnum: \
- $method ($largs) $nentries test cursor insert functionality"
-
- # Create the database and open the dictionary
- set eindex [lsearch -exact $largs "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set txnenv 0
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $largs $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append largs " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- if {$renum == 1} {
- append largs " -renumber"
- }
- set db [eval {berkdb_open \
- -create -mode 0644} $largs {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # The basic structure of the test is that we pick a random key
- # in the database and then add items before, after, ?? it. The
- # trickiness is that with RECNO, these are not duplicates, they
- # are creating new keys. Therefore, every time we do this, the
- # keys assigned to other values change. For this reason, we'll
- # keep the database in tcl as a list and insert properly into
- # it to verify that the right thing is happening. If we do not
- # have renumber set, then the BEFORE and AFTER calls should fail.
-
- # Seed the database with an initial record
- gets $did str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good put $ret 0
- set count 1
-
- set dlist "NULL $str"
-
- # Open a cursor
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- puts "\tTest$tnum.a: put and get entries"
- while { [gets $did str] != -1 && $count < $nentries } {
- # Pick a random key
- set key [berkdb random_int 1 $count]
- set ret [$dbc get -set $key]
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_good cget:SET:key $k $key
- error_check_good \
- cget:SET $d [pad_data $method [lindex $dlist $key]]
-
- # Current
- set ret [$dbc put -current [chop_data $method $str]]
- error_check_good cput:$key $ret 0
- set dlist [lreplace $dlist $key $key [pad_data $method $str]]
-
- # Before
- if { [gets $did str] == -1 } {
- continue;
- }
-
- if { $renum == 1 } {
- set ret [$dbc put \
- -before [chop_data $method $str]]
- error_check_good cput:$key:BEFORE $ret $key
- set dlist [linsert $dlist $key $str]
- incr count
-
- # After
- if { [gets $did str] == -1 } {
- continue;
- }
- set ret [$dbc put \
- -after [chop_data $method $str]]
- error_check_good cput:$key:AFTER $ret [expr $key + 1]
- set dlist [linsert $dlist [expr $key + 1] $str]
- incr count
- }
-
- # Now verify that the keys are in the right place
- set i 0
- for {set ret [$dbc get "-set" $key]} \
- {[string length $ret] != 0 && $i < 3} \
- {set ret [$dbc get "-next"] } {
- set check_key [expr $key + $i]
-
- set k [lindex [lindex $ret 0] 0]
- error_check_good cget:$key:loop $k $check_key
-
- set d [lindex [lindex $ret 0] 1]
- error_check_good cget:data $d \
- [pad_data $method [lindex $dlist $check_key]]
- incr i
- }
- }
- close $did
- error_check_good cclose [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Create check key file.
- set oid [open $t2 w]
- for {set i 1} {$i <= $count} {incr i} {
- puts $oid $i
- }
- close $oid
-
- puts "\tTest$tnum.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test011_check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good Test$tnum:diff($t2,$t1) \
- [filecmp $t2 $t1] 0
-
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.c: close, open, and dump file"
- open_and_dump_file $testfile $env $t1 test011_check \
- dump_file_direction "-first" "-next"
- error_check_good Test$tnum:diff($t2,$t1) \
- [filecmp $t2 $t1] 0
-
- puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 test011_check \
- dump_file_direction "-last" "-prev"
-
- filesort $t1 $t3 -n
- error_check_good Test$tnum:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-}
-
-proc test011_check { key data } {
- global dlist
-
- error_check_good "get key $key" $data [lindex $dlist $key]
-}
diff --git a/storage/bdb/test/test012.tcl b/storage/bdb/test/test012.tcl
deleted file mode 100644
index 42225d982cd..00000000000
--- a/storage/bdb/test/test012.tcl
+++ /dev/null
@@ -1,139 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test012.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test012
-# TEST Large keys/small data
-# TEST Same as test003 except use big keys (source files and
-# TEST executables) and small data (the file/executable names).
-# TEST
-# TEST Take the source files and dbtest executable and enter their contents
-# TEST as the key with their names as data. After all are entered, retrieve
-# TEST all; compare output to original. Close file, reopen, do retrieve and
-# TEST re-verify.
-proc test012 { method args} {
- global names
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 } {
- puts "Test012 skipping for method $method"
- return
- }
-
- puts "Test012: $method ($args) filename=data filecontents=key pairs"
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test012.db
- set env NULL
- } else {
- set testfile test012.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list]
-
- puts "\tTest012.a: put/get loop"
- set count 0
- foreach f $file_list {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- put_file_as_key $db $txn $pflags $f
-
- set kd [get_file_as_key $db $txn $gflags $f]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest012.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_binkey_file $db $txn $t1 test012.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the data to see if they match the .o and dbtest files
- set oid [open $t2.tmp w]
- foreach f $file_list {
- puts $oid $f
- }
- close $oid
- filesort $t2.tmp $t2
- fileremove $t2.tmp
- filesort $t1 $t3
-
- error_check_good Test012:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again.
- puts "\tTest012.c: close, open, and dump file"
- open_and_dump_file $testfile $env $t1 test012.check \
- dump_binkey_file_direction "-first" "-next"
-
- filesort $t1 $t3
-
- error_check_good Test012:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest012.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 test012.check\
- dump_binkey_file_direction "-last" "-prev"
-
- filesort $t1 $t3
-
- error_check_good Test012:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-}
-
-# Check function for test012; key should be file name; data should be contents
-proc test012.check { binfile tmpfile } {
- source ./include.tcl
-
- error_check_good Test012:diff($binfile,$tmpfile) \
- [filecmp $binfile $tmpfile] 0
-}
diff --git a/storage/bdb/test/test013.tcl b/storage/bdb/test/test013.tcl
deleted file mode 100644
index e456965bfd9..00000000000
--- a/storage/bdb/test/test013.tcl
+++ /dev/null
@@ -1,240 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test013.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test013
-# TEST Partial put test
-# TEST Overwrite entire records using partial puts.
-# TEST Make surethat NOOVERWRITE flag works.
-# TEST
-# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
-# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
-# TEST 3. Actually overwrite each one with its datum reversed.
-# TEST
-# TEST No partial testing here.
-proc test013 { method {nentries 10000} args } {
- global errorCode
- global errorInfo
- global fixed_len
-
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test013.db
- set env NULL
- } else {
- set testfile test013.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test013_recno.check
- append gflags " -recno"
- global kvals
- } else {
- set checkfunc test013.check
- }
- puts "\tTest013.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags $txn {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now we will try to overwrite each datum, but set the
- # NOOVERWRITE flag.
- puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags \
- {-nooverwrite $key [chop_data $method $str]}]
- error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
-
- # Value should be unchanged.
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now we will replace each item with its datum capitalized.
- puts "\tTest013.c: overwrite values with capitalized datum"
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- set rstr [string toupper $str]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} \
- $txn $pflags {$key [chop_data $method $rstr]}]
- error_check_good put $r 0
-
- # Value should be changed.
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $rstr]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now make sure that everything looks OK
- puts "\tTest013.d: check entire file contents"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {incr i} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good \
- Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- puts "\tTest013.e: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction "-first" "-next"
-
- if { [is_record_based $method] == 0 } {
- filesort $t1 $t3
- }
-
- error_check_good \
- Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tTest013.f: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction "-last" "-prev"
-
- if { [is_record_based $method] == 0 } {
- filesort $t1 $t3
- }
-
- error_check_good \
- Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
-}
-
-# Check function for test013; keys and data are identical
-proc test013.check { key data } {
- error_check_good \
- "key/data mismatch for $key" $data [string toupper $key]
-}
-
-proc test013_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good \
- "data mismatch for $key" $data [string toupper $kvals($key)]
-}
diff --git a/storage/bdb/test/test014.tcl b/storage/bdb/test/test014.tcl
deleted file mode 100644
index 708d5dc09e5..00000000000
--- a/storage/bdb/test/test014.tcl
+++ /dev/null
@@ -1,253 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test014.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test014
-# TEST Exercise partial puts on short data
-# TEST Run 5 combinations of numbers of characters to replace,
-# TEST and number of times to increase the size by.
-# TEST
-# TEST Partial put test, small data, replacing with same size. The data set
-# TEST consists of the first nentries of the dictionary. We will insert them
-# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
-# TEST we'll try to perform partial puts of some characters at the beginning,
-# TEST some at the end, and some at the middle.
-proc test014 { method {nentries 10000} args } {
- set fixed 0
- set args [convert_args $method $args]
-
- if { [is_fixed_length $method] == 1 } {
- set fixed 1
- }
-
- puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
-
- # flagp indicates whether this is a postpend or a
- # normal partial put
- set flagp 0
-
- eval {test014_body $method $flagp 1 1 $nentries} $args
- eval {test014_body $method $flagp 1 4 $nentries} $args
- eval {test014_body $method $flagp 2 4 $nentries} $args
- eval {test014_body $method $flagp 1 128 $nentries} $args
- eval {test014_body $method $flagp 2 16 $nentries} $args
- if { $fixed == 0 } {
- eval {test014_body $method $flagp 0 1 $nentries} $args
- eval {test014_body $method $flagp 0 4 $nentries} $args
- eval {test014_body $method $flagp 0 128 $nentries} $args
-
- # POST-PENDS :
- # partial put data after the end of the existent record
- # chars: number of empty spaces that will be padded with null
- # increase: is the length of the str to be appended (after pad)
- #
- set flagp 1
- eval {test014_body $method $flagp 1 1 $nentries} $args
- eval {test014_body $method $flagp 4 1 $nentries} $args
- eval {test014_body $method $flagp 128 1 $nentries} $args
- eval {test014_body $method $flagp 1 4 $nentries} $args
- eval {test014_body $method $flagp 1 128 $nentries} $args
- }
- puts "Test014 complete."
-}
-
-proc test014_body { method flagp chars increase {nentries 10000} args } {
- source ./include.tcl
-
- set omethod [convert_method $method]
-
- if { [is_fixed_length $method] == 1 && $chars != $increase } {
- puts "Test014: $method: skipping replace\
- $chars chars with string $increase times larger."
- return
- }
-
- if { $flagp == 1} {
- puts "Test014: Postpending string of len $increase with \
- gap $chars."
- } else {
- puts "Test014: Replace $chars chars with string \
- $increase times larger"
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test014.db
- set env NULL
- } else {
- set testfile test014.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set gflags ""
- set pflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- puts "\tTest014.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- # We will do the initial put and then three Partial Puts
- # for the beginning, middle and end of the string.
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- if { $flagp == 1 } {
- # this is for postpend only
- global dvals
-
- # initial put
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $str}]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good dbput $ret 0
-
- set offset [string length $str]
-
- # increase is the actual number of new bytes
- # to be postpended (besides the null padding)
- set data [repeat "P" $increase]
-
- # chars is the amount of padding in between
- # the old data and the new
- set len [expr $offset + $chars + $increase]
- set dvals($key) [binary format \
- a[set offset]x[set chars]a[set increase] \
- $str $data]
- set offset [expr $offset + $chars]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put -partial [list $offset 0]} \
- $txn {$key $data}]
- error_check_good dbput:post $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- } else {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- partial_put $method $db $txn \
- $gflags $key $str $chars $increase
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- incr count
- }
- close $did
-
- # Now make sure that everything looks OK
- puts "\tTest014.b: check entire file contents"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test014.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good \
- Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- puts "\tTest014.c: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env \
- $t1 test014.check dump_file_direction "-first" "-next"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t2 $t3
- file rename -force $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good \
- Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tTest014.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 \
- test014.check dump_file_direction "-last" "-prev"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t2 $t3
- file rename -force $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good \
- Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
-}
-
-# Check function for test014; keys and data are identical
-proc test014.check { key data } {
- global dvals
-
- error_check_good key"$key"_exists [info exists dvals($key)] 1
- error_check_good "data mismatch for key $key" $data $dvals($key)
-}
diff --git a/storage/bdb/test/test015.tcl b/storage/bdb/test/test015.tcl
deleted file mode 100644
index 9401228116f..00000000000
--- a/storage/bdb/test/test015.tcl
+++ /dev/null
@@ -1,276 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test015.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test015
-# TEST Partial put test
-# TEST Partial put test where the key does not initially exist.
-proc test015 { method {nentries 7500} { start 0 } args } {
- global fixed_len testdir
-
- set low_range 50
- set mid_range 100
- set high_range 1000
-
- if { [is_fixed_length $method] } {
- set low_range [expr $fixed_len/2 - 2]
- set mid_range [expr $fixed_len/2]
- set high_range $fixed_len
- }
-
- set t_table {
- { 1 { 1 1 1 } }
- { 2 { 1 1 5 } }
- { 3 { 1 1 $low_range } }
- { 4 { 1 $mid_range 1 } }
- { 5 { $mid_range $high_range 5 } }
- { 6 { 1 $mid_range $low_range } }
- }
-
- puts "Test015: \
- $method ($args) $nentries equal key/data pairs, partial put test"
- test015_init
- if { $start == 0 } {
- set start { 1 2 3 4 5 6 }
- }
- foreach entry $t_table {
- set this [lindex $entry 0]
- if { [lsearch $start $this] == -1 } {
- continue
- }
- puts -nonewline "$this: "
- eval [concat test015_body $method [lindex $entry 1] \
- $nentries $args]
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- set testdir [get_home $env]
- }
-puts "Verifying testdir $testdir"
-
- error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
- }
-}
-
-proc test015_init { } {
- global rand_init
-
- berkdb srand $rand_init
-}
-
-proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
- global dvals
- global fixed_len
- global testdir
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set checkfunc test015.check
-
- if { [is_fixed_length $method] && \
- [string compare $omethod "-recno"] == 0} {
- # is fixed recno method
- set checkfunc test015.check
- }
-
- puts "Put $rcount strings random offsets between $off_low and $off_hi"
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test015.db
- set env NULL
- } else {
- set testfile test015.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries > 5000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set retdir $testdir
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- puts "\tTest015.a: put/get loop for $nentries entries"
-
- # Here is the loop where we put and get each key/data pair
- # Each put is a partial put of a record that does not exist.
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- if { [string length $str] > $fixed_len } {
- continue
- }
- set key [expr $count + 1]
- } else {
- set key $str
- }
-
- if { 0 } {
- set data [replicate $str $rcount]
- set off [ berkdb random_int $off_low $off_hi ]
- set offn [expr $off + 1]
- if { [is_fixed_length $method] && \
- [expr [string length $data] + $off] >= $fixed_len} {
- set data [string range $data 0 [expr $fixed_len-$offn]]
- }
- set dvals($key) [partial_shift $data $off right]
- } else {
- set data [chop_data $method [replicate $str $rcount]]
-
- # This is a hack. In DB we will store the records with
- # some padding, but these will get lost if we just return
- # them in TCL. As a result, we're going to have to hack
- # get to check for 0 padding and return a list consisting
- # of the number of 0's and the actual data.
- set off [ berkdb random_int $off_low $off_hi ]
-
- # There is no string concatenation function in Tcl
- # (although there is one in TclX), so we have to resort
- # to this hack. Ugh.
- set slen [string length $data]
- if {[is_fixed_length $method] && \
- $slen > $fixed_len - $off} {
- set $slen [expr $fixed_len - $off]
- }
- set a "a"
- set dvals($key) [pad_data \
- $method [eval "binary format x$off$a$slen" {$data}]]
- }
- if {[is_fixed_length $method] && \
- [string length $data] > ($fixed_len - $off)} {
- set slen [expr $fixed_len - $off]
- set data [eval "binary format a$slen" {$data}]
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn \
- {-partial [list $off [string length $data]] $key $data}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- incr count
- }
- close $did
-
- # Now make sure that everything looks OK
- puts "\tTest015.b: check entire file contents"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- filesort $t2 $t3
- file rename -force $t3 $t2
- filesort $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Test015:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tTest015.c: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $t1 \
- $checkfunc dump_file_direction "-first" "-next"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Test015:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tTest015.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 \
- $checkfunc dump_file_direction "-last" "-prev"
-
- if { [string compare $omethod "-recno"] != 0 } {
- filesort $t1 $t3
- }
-
- error_check_good Test015:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- unset dvals
-}
-
-# Check function for test015; keys and data are identical
-proc test015.check { key data } {
- global dvals
-
- error_check_good key"$key"_exists [info exists dvals($key)] 1
- binary scan $data "c[string length $data]" a
- binary scan $dvals($key) "c[string length $dvals($key)]" b
- error_check_good "mismatch on padding for key $key" $a $b
-}
-
-proc test015.fixed.check { key data } {
- global dvals
- global fixed_len
-
- error_check_good key"$key"_exists [info exists dvals($key)] 1
- if { [string length $data] > $fixed_len } {
- error_check_bad \
- "data length:[string length $data] \
- for fixed:$fixed_len" 1 1
- }
- puts "$data : $dvals($key)"
- error_check_good compare_data($data,$dvals($key) \
- $dvals($key) $data
-}
diff --git a/storage/bdb/test/test016.tcl b/storage/bdb/test/test016.tcl
deleted file mode 100644
index 481c85ec766..00000000000
--- a/storage/bdb/test/test016.tcl
+++ /dev/null
@@ -1,207 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test016.tcl,v 11.25 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test016
-# TEST Partial put test
-# TEST Partial put where the datum gets shorter as a result of the put.
-# TEST
-# TEST Partial put test where partial puts make the record smaller.
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and a fixed, medium length data string;
-# TEST retrieve each. After all are entered, go back and do partial puts,
-# TEST replacing a random-length string with the key value.
-# TEST Then verify.
-
-proc test016 { method {nentries 10000} args } {
- global datastr
- global dvals
- global rand_init
- source ./include.tcl
-
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_fixed_length $method] == 1 } {
- puts "Test016: skipping for method $method"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test016.db
- set env NULL
- } else {
- set testfile test016.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test016: $method ($args) $nentries partial put shorten"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- # Here is the loop where we put and get each key/data pair
- puts "\tTest016.a: put/get loop"
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $datastr]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Next we will do a partial put replacement, making the data
- # shorter
- puts "\tTest016.b: partial put loop"
- set did [open $dict]
- set count 0
- set len [string length $datastr]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
-
- set repl_len [berkdb random_int [string length $key] $len]
- set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
- set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
- set s2 [string toupper $key]
- set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
- set dvals($key) [pad_data $method $s1$s2$s3]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {-partial \
- [list $repl_off $repl_len] $key [chop_data $method $s2]}]
- error_check_good put $ret 0
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest016.c: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test016.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again.
- puts "\tTest016.d: close, open, and dump file"
- open_and_dump_file $testfile $env $t1 test016.check \
- dump_file_direction "-first" "-next"
-
- if { [ is_record_based $method ] == 0 } {
- filesort $t1 $t3
- }
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest016.e: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 test016.check \
- dump_file_direction "-last" "-prev"
-
- if { [ is_record_based $method ] == 0 } {
- filesort $t1 $t3
- }
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-}
-
-# Check function for test016; data should be whatever is set in dvals
-proc test016.check { key data } {
- global datastr
- global dvals
-
- error_check_good key"$key"_exists [info exists dvals($key)] 1
- error_check_good "data mismatch for key $key" $data $dvals($key)
-}
diff --git a/storage/bdb/test/test017.tcl b/storage/bdb/test/test017.tcl
deleted file mode 100644
index 6503b2cc140..00000000000
--- a/storage/bdb/test/test017.tcl
+++ /dev/null
@@ -1,316 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test017.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test017
-# TEST Basic offpage duplicate test.
-# TEST
-# TEST Run duplicates with small page size so that we test off page duplicates.
-# TEST Then after we have an off-page database, test with overflow pages too.
-proc test017 { method {contents 0} {ndups 19} {tnum "017"} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- incr pgindex
- if { [lindex $args $pgindex] > 8192 } {
- puts "Test$tnum: Skipping for large pagesizes"
- return
- }
- }
-
- # Create the database and open the dictionary
- set limit 0
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- set limit 100
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644 -dup} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- set file_list [get_file_list 1]
- if { $txnenv == 1 } {
- if { [llength $file_list] > $limit } {
- set file_list [lrange $file_list 0 $limit]
- }
- set flen [llength $file_list]
- reduce_dups flen ndups
- }
- puts "Test$tnum: $method ($args) Off page duplicate tests\
- with $ndups duplicates"
-
- set ovfl ""
- # Here is the loop where we put and get each key/data pair
- puts -nonewline "\tTest$tnum.a: Creating duplicates with "
- if { $contents != 0 } {
- puts "file contents as key/data"
- } else {
- puts "file name as key/data"
- }
- foreach f $file_list {
- if { $contents != 0 } {
- set fid [open $f r]
- fconfigure $fid -translation binary
- #
- # Prepend file name to guarantee uniqueness
- set filecont [read $fid]
- set str $f:$filecont
- close $fid
- } else {
- set str $f
- }
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- #
- # Save 10% files for overflow test
- #
- if { $contents == 0 && [expr $count % 10] == 0 } {
- lappend ovfl $f
- }
- # Now retrieve all the keys matching this key
- set ret [$db get $str]
- error_check_bad $f:dbget_dups [llength $ret] 0
- error_check_good $f:dbget_dups1 [llength $ret] $ndups
- set x 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- for {set ret [$dbc get "-set" $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get "-next"] } {
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- if {[string length $d] == 0} {
- break
- }
- error_check_good "Test$tnum:get" $d $str
- set id [ id_of $datastr ]
- error_check_good "Test$tnum:$f:dup#" $id $x
- incr x
- }
- error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: Checking file for correct duplicates"
- set dlist ""
- for { set i 1 } { $i <= $ndups } {incr i} {
- lappend dlist $i
- }
- set oid [open $t2.tmp w]
- set o1id [open $t4.tmp w]
- foreach f $file_list {
- for {set i 1} {$i <= $ndups} {incr i} {
- puts $o1id $f
- }
- puts $oid $f
- }
- close $oid
- close $o1id
- filesort $t2.tmp $t2
- filesort $t4.tmp $t4
- fileremove $t2.tmp
- fileremove $t4.tmp
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- if {$contents == 0} {
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- # Now compare the keys to see if they match the file names
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test017.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
- }
-
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.c: Checking file for correct duplicates after close"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if {$contents == 0} {
- # Now compare the keys to see if they match the filenames
- filesort $t1 $t3
- error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.d: Verify off page duplicates and overflow status"
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set stat [$db stat]
- if { [is_btree $method] } {
- error_check_bad stat:offpage \
- [is_substr $stat "{{Internal pages} 0}"] 1
- }
- if {$contents == 0} {
- # This check doesn't work in hash, since overflow
- # pages count extra pages in buckets as well as true
- # P_OVERFLOW pages.
- if { [is_hash $method] == 0 } {
- error_check_good overflow \
- [is_substr $stat "{{Overflow pages} 0}"] 1
- }
- } else {
- if { [is_hash $method] } {
- error_check_bad overflow \
- [is_substr $stat "{{Number of big pages} 0}"] 1
- } else {
- error_check_bad overflow \
- [is_substr $stat "{{Overflow pages} 0}"] 1
- }
- }
-
- #
- # If doing overflow test, do that now. Else we are done.
- # Add overflow pages by adding a large entry to a duplicate.
- #
- if { [llength $ovfl] == 0} {
- error_check_good db_close [$db close] 0
- return
- }
-
- puts "\tTest$tnum.e: Add overflow duplicate entries"
- set ovfldup [expr $ndups + 1]
- foreach f $ovfl {
- #
- # This is just like put_file, but prepends the dup number
- #
- set fid [open $f r]
- fconfigure $fid -translation binary
- set fdata [read $fid]
- close $fid
- set data $ovfldup:$fdata:$fdata:$fdata:$fdata
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags {$f $data}]
- error_check_good ovfl_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- puts "\tTest$tnum.f: Verify overflow duplicate entries"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist $ovfldup
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- filesort $t1 $t3
- error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
-
- set stat [$db stat]
- if { [is_hash [$db get_type]] } {
- error_check_bad overflow1_hash [is_substr $stat \
- "{{Number of big pages} 0}"] 1
- } else {
- error_check_bad \
- overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
- }
- error_check_good db_close [$db close] 0
-}
-
-# Check function; verify data contains key
-proc test017.check { key data } {
- error_check_good "data mismatch for key $key" $key [data_of $data]
-}
diff --git a/storage/bdb/test/test018.tcl b/storage/bdb/test/test018.tcl
deleted file mode 100644
index bf2e3eb562e..00000000000
--- a/storage/bdb/test/test018.tcl
+++ /dev/null
@@ -1,21 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test018.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test018
-# TEST Offpage duplicate test
-# TEST Key_{first,last,before,after} offpage duplicates.
-# TEST Run duplicates with small page size so that we test off page
-# TEST duplicates.
-proc test018 { method {nentries 10000} args} {
- puts "Test018: Off page duplicate tests"
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test018: Skipping for specific pagesizes"
- return
- }
- eval {test011 $method $nentries 19 "018" -pagesize 512} $args
-}
diff --git a/storage/bdb/test/test019.tcl b/storage/bdb/test/test019.tcl
deleted file mode 100644
index 68f6487be4f..00000000000
--- a/storage/bdb/test/test019.tcl
+++ /dev/null
@@ -1,136 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test019.tcl,v 11.24 2004/04/22 18:57:32 sue Exp $
-#
-# TEST test019
-# TEST Partial get test.
-proc test019 { method {nentries 10000} args } {
- global fixed_len
- global rand_init
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test019.db
- set env NULL
- } else {
- set testfile test019.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test019: $method ($args) $nentries partial get test"
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
- berkdb srand $rand_init
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- puts "\tTest019.a: put/get loop"
- for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
- { incr i } {
-
- if { [is_record_based $method] == 1 } {
- set key [expr $i + 1]
- } else {
- set key $str
- }
- set repl [berkdb random_int $fixed_len 100]
- set data [chop_data $method [replicate $str $repl]]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {-nooverwrite $key $data}]
- error_check_good dbput:$key $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- dbget:$key $ret [list [list $key [pad_data $method $data]]]
- set kvals($key) $repl
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- close $did
-
- puts "\tTest019.b: partial get loop"
- set did [open $dict]
- for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
- { incr i } {
- if { [is_record_based $method] == 1 } {
- set key [expr $i + 1]
- } else {
- set key $str
- }
- set data [pad_data $method [replicate $str $kvals($key)]]
-
- set maxndx [expr [string length $data] - 1]
-
- if { $maxndx > 0 } {
- set beg [berkdb random_int 0 [expr $maxndx - 1]]
- set len [berkdb random_int 0 [expr $maxndx * 2]]
- } else {
- set beg 0
- set len 0
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db get} \
- $txn {-partial [list $beg $len]} $gflags {$key}]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # In order for tcl to handle this, we have to overwrite the
- # last character with a NULL. That makes the length one less
- # than we expect.
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_good dbget_key $k $key
-
- error_check_good dbget_data $d \
- [string range $data $beg [expr $beg + $len - 1]]
-
- }
- error_check_good db_close [$db close] 0
- close $did
-}
diff --git a/storage/bdb/test/test020.tcl b/storage/bdb/test/test020.tcl
deleted file mode 100644
index 19eda9c313c..00000000000
--- a/storage/bdb/test/test020.tcl
+++ /dev/null
@@ -1,137 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test020.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test020
-# TEST In-Memory database tests.
-proc test020 { method {nentries 10000} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- if { [is_queueext $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test020 skipping for method $method"
- return
- }
- # Create the database and open the dictionary
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # Check if we are using an env.
- if { $eindex == -1 } {
- set env NULL
- } else {
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test020: $method ($args) $nentries equal key/data pairs"
-
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test020_recno.check
- append gflags " -recno"
- } else {
- set checkfunc test020.check
- }
- puts "\tTest020.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest020.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Test020:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-}
-
-# Check function for test020; keys and data are identical
-proc test020.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc test020_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "data mismatch: key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/test021.tcl b/storage/bdb/test/test021.tcl
deleted file mode 100644
index 43a7a4bde14..00000000000
--- a/storage/bdb/test/test021.tcl
+++ /dev/null
@@ -1,162 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test021.tcl,v 11.17 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test021
-# TEST Btree range tests.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self, reversed as key and self as data.
-# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
-# TEST getting about 20 keys sequentially after it (in some cases we'll
-# TEST run out towards the end of the file).
-proc test021 { method {nentries 10000} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test021.db
- set env NULL
- } else {
- set testfile test021.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- puts "Test021: $method ($args) $nentries equal key/data pairs"
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test021_recno.check
- append gflags " -recno"
- } else {
- set checkfunc test021.check
- }
- puts "\tTest021.a: put loop"
- # Here is the loop where we put each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key [reverse $str]
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good db_put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and retrieve about 20
- # records after it.
- error_check_good db_close [$db close] 0
-
- puts "\tTest021.b: test ranges"
- set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Open a cursor
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- set did [open $dict]
- set i 0
- while { [gets $did str] != -1 && $i < $count } {
- if { [is_record_based $method] == 1 } {
- set key [expr $i + 1]
- } else {
- set key [reverse $str]
- }
-
- set r [$dbc get -set_range $key]
- error_check_bad dbc_get:$key [string length $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- $checkfunc $k $d
-
- for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
- set r [$dbc get "-next"]
- # no error checking because we may run off the end
- # of the database
- if { [llength $r] == 0 } {
- continue;
- }
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- $checkfunc $k $d
- }
- incr i
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- close $did
-}
-
-# Check function for test021; keys and data are reversed
-proc test021.check { key data } {
- error_check_good "key/data mismatch for $key" $data [reverse $key]
-}
-
-proc test021_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "data mismatch: key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/test022.tcl b/storage/bdb/test/test022.tcl
deleted file mode 100644
index deded62e7a7..00000000000
--- a/storage/bdb/test/test022.tcl
+++ /dev/null
@@ -1,62 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test022.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test022
-# TEST Test of DB->getbyteswapped().
-proc test022 { method args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test022 ($args) $omethod: DB->getbyteswapped()"
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile1 "$testdir/test022a.db"
- set testfile2 "$testdir/test022b.db"
- set env NULL
- } else {
- set testfile1 "test022a.db"
- set testfile2 "test022b.db"
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- # Create two databases, one in each byte order.
- set db1 [eval {berkdb_open -create \
- -mode 0644} $omethod $args {-lorder 1234} $testfile1]
- error_check_good db1_open [is_valid_db $db1] TRUE
-
- set db2 [eval {berkdb_open -create \
- -mode 0644} $omethod $args {-lorder 4321} $testfile2]
- error_check_good db2_open [is_valid_db $db2] TRUE
-
- # Call DB->get_byteswapped on both of them.
- set db1_order [$db1 is_byteswapped]
- set db2_order [$db2 is_byteswapped]
-
- # Make sure that both answers are either 1 or 0,
- # and that exactly one of them is 1.
- error_check_good is_byteswapped_sensible_1 \
- [expr ($db1_order == 1 && $db2_order == 0) || \
- ($db1_order == 0 && $db2_order == 1)] 1
-
- error_check_good db1_close [$db1 close] 0
- error_check_good db2_close [$db2 close] 0
- puts "\tTest022 complete."
-}
diff --git a/storage/bdb/test/test023.tcl b/storage/bdb/test/test023.tcl
deleted file mode 100644
index c4a707288ff..00000000000
--- a/storage/bdb/test/test023.tcl
+++ /dev/null
@@ -1,219 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test023.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $
-#
-# TEST test023
-# TEST Duplicate test
-# TEST Exercise deletes and cursor operations within a duplicate set.
-# TEST Add a key with duplicates (first time on-page, second time off-page)
-# TEST Number the dups.
-# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
-proc test023 { method args } {
- global alphabet
- global dupnum
- global dupstr
- global errorInfo
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- puts "Test023: $method delete duplicates/check cursor operations"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test023: skipping for method $omethod"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test023.db
- set env NULL
- } else {
- set testfile test023.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644 -dup} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- foreach i { onpage offpage } {
- if { $i == "onpage" } {
- set dupstr DUP
- } else {
- set dupstr [repeat $alphabet 50]
- }
- puts "\tTest023.a: Insert key w/$i dups"
- set key "duplicate_val_test"
- for { set count 0 } { $count < 20 } { incr count } {
- set ret \
- [eval {$db put} $txn $pflags {$key $count$dupstr}]
- error_check_good db_put $ret 0
- }
-
- # Now let's get all the items and make sure they look OK.
- puts "\tTest023.b: Check initial duplicates"
- set dupnum 0
- dump_file $db $txn $t1 test023.check
-
- # Delete a couple of random items (FIRST, LAST one in middle)
- # Make sure that current returns an error and that NEXT and
- # PREV do the right things.
-
- set ret [$dbc get -set $key]
- error_check_bad dbc_get:SET [llength $ret] 0
-
- puts "\tTest023.c: Delete first and try gets"
- # This should be the first duplicate
- error_check_good \
- dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
-
- # Now delete it.
- set ret [$dbc del]
- error_check_good dbc_del:FIRST $ret 0
-
- # Now current should fail
- set ret [$dbc get -current]
- error_check_good dbc_get:CURRENT $ret ""
-
- # Now Prev should fail
- set ret [$dbc get -prev]
- error_check_good dbc_get:prev0 [llength $ret] 0
-
- # Now 10 nexts should work to get us in the middle
- for { set j 1 } { $j <= 10 } { incr j } {
- set ret [$dbc get -next]
- error_check_good \
- dbc_get:next [llength [lindex $ret 0]] 2
- error_check_good \
- dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
- }
-
- puts "\tTest023.d: Delete middle and try gets"
- # Now do the delete on the current key.
- set ret [$dbc del]
- error_check_good dbc_del:10 $ret 0
-
- # Now current should fail
- set ret [$dbc get -current]
- error_check_good dbc_get:deleted $ret ""
-
- # Prev and Next should work
- set ret [$dbc get -next]
- error_check_good dbc_get:next [llength [lindex $ret 0]] 2
- error_check_good \
- dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
-
- set ret [$dbc get -prev]
- error_check_good dbc_get:next [llength [lindex $ret 0]] 2
- error_check_good \
- dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
-
- # Now go to the last one
- for { set j 11 } { $j <= 19 } { incr j } {
- set ret [$dbc get -next]
- error_check_good \
- dbc_get:next [llength [lindex $ret 0]] 2
- error_check_good \
- dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
- }
-
- puts "\tTest023.e: Delete last and try gets"
- # Now do the delete on the current key.
- set ret [$dbc del]
- error_check_good dbc_del:LAST $ret 0
-
- # Now current should fail
- set ret [$dbc get -current]
- error_check_good dbc_get:deleted $ret ""
-
- # Next should fail
- set ret [$dbc get -next]
- error_check_good dbc_get:next19 [llength $ret] 0
-
- # Prev should work
- set ret [$dbc get -prev]
- error_check_good dbc_get:next [llength [lindex $ret 0]] 2
- error_check_good \
- dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
-
- # Now overwrite the current one, then count the number
- # of data items to make sure that we have the right number.
-
- puts "\tTest023.f: Count keys, overwrite current, count again"
- # At this point we should have 17 keys the (initial 20 minus
- # 3 deletes)
- set dbc2 [eval {$db cursor} $txn]
- error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
-
- set count_check 0
- for { set rec [$dbc2 get -first] } {
- [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
- incr count_check
- }
- error_check_good numdups $count_check 17
-
- set ret [$dbc put -current OVERWRITE]
- error_check_good dbc_put:current $ret 0
-
- set count_check 0
- for { set rec [$dbc2 get -first] } {
- [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
- incr count_check
- }
- error_check_good numdups $count_check 17
- error_check_good dbc2_close [$dbc2 close] 0
-
- # Done, delete all the keys for next iteration
- set ret [eval {$db del} $txn {$key}]
- error_check_good db_delete $ret 0
-
- # database should be empty
-
- set ret [$dbc get -first]
- error_check_good first_after_empty [llength $ret] 0
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
-}
-
-# Check function for test023; keys and data are identical
-proc test023.check { key data } {
- global dupnum
- global dupstr
- error_check_good "bad key" $key duplicate_val_test
- error_check_good "data mismatch for $key" $data $dupnum$dupstr
- incr dupnum
-}
diff --git a/storage/bdb/test/test024.tcl b/storage/bdb/test/test024.tcl
deleted file mode 100644
index 4ac1fceaeb2..00000000000
--- a/storage/bdb/test/test024.tcl
+++ /dev/null
@@ -1,268 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test024.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test024
-# TEST Record number retrieval test.
-# TEST Test the Btree and Record number get-by-number functionality.
-proc test024 { method {nentries 10000} args} {
- source ./include.tcl
- global rand_init
-
- set do_renumber [is_rrecno $method]
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test024: $method ($args)"
-
- if { [string compare $omethod "-hash"] == 0 } {
- puts "Test024 skipping for method HASH"
- return
- }
-
- berkdb srand $rand_init
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test024.db
- set env NULL
- } else {
- set testfile test024.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
-
- cleanup $testdir $env
-
- # Read the first nentries dictionary elements and reverse them.
- # Keep a list of these (these will be the keys).
- puts "\tTest024.a: initialization"
- set keys ""
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- lappend keys [reverse $str]
- incr count
- }
- close $did
-
- # Generate sorted order for the keys
- set sorted_keys [lsort $keys]
- # Create the database
- if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create \
- -mode 0644 -recnum} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [eval {berkdb_open -create \
- -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
-
- set pflags ""
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- set gflags " -recno"
- }
-
- puts "\tTest024.b: put/get loop"
- foreach k $keys {
- if { [is_record_based $method] == 1 } {
- set key [lsearch $sorted_keys $k]
- incr key
- } else {
- set key $k
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $k]}]
- error_check_good put $ret 0
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $k]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest024.c: dump file"
-
- # Put sorted keys in file
- set oid [open $t1 w]
- foreach k $sorted_keys {
- puts $oid [pad_data $method $k]
- }
- close $oid
-
- # Instead of using dump_file; get all the keys by keynum
- set oid [open $t2 w]
- if { [string compare $omethod "-btree"] == 0 } {
- set do_renumber 1
- }
-
- set gflags " -recno"
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
- puts $oid [lindex [lindex $ret 0] 1]
- error_check_good recnum_get [lindex [lindex $ret 0] 1] \
- [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
- }
- close $oid
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- error_check_good Test024.c:diff($t1,$t2) \
- [filecmp $t1 $t2] 0
-
- # Now, reopen the file and run the last test again.
- puts "\tTest024.d: close, open, and dump file"
- set db [eval {berkdb_open -rdonly} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set oid [open $t2 w]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
- puts $oid [lindex [lindex $ret 0] 1]
- error_check_good recnum_get [lindex [lindex $ret 0] 1] \
- [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $oid
- error_check_good db_close [$db close] 0
- error_check_good Test024.d:diff($t1,$t2) \
- [filecmp $t1 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest024.e: close, open, and dump file in reverse direction"
- set db [eval {berkdb_open -rdonly} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- # Put sorted keys in file
- set rsorted ""
- foreach k $sorted_keys {
- set rsorted [linsert $rsorted 0 $k]
- }
- set oid [open $t1 w]
- foreach k $rsorted {
- puts $oid [pad_data $method $k]
- }
- close $oid
-
- set oid [open $t2 w]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for { set k $count } { $k > 0 } { incr k -1 } {
- set ret [eval {$db get} $txn $gflags {$k}]
- puts $oid [lindex [lindex $ret 0] 1]
- error_check_good recnum_get [lindex [lindex $ret 0] 1] \
- [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $oid
- error_check_good db_close [$db close] 0
- error_check_good Test024.e:diff($t1,$t2) \
- [filecmp $t1 $t2] 0
-
- # Now try deleting elements and making sure they work
- puts "\tTest024.f: delete test"
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- while { $count > 0 } {
- set kndx [berkdb random_int 1 $count]
- set kval [lindex $keys [expr $kndx - 1]]
- set recno [expr [lsearch $sorted_keys $kval] + 1]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { [is_record_based $method] == 1 } {
- set ret [eval {$db del} $txn {$recno}]
- } else {
- set ret [eval {$db del} $txn {$kval}]
- }
- error_check_good delete $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Remove the key from the key list
- set ndx [expr $kndx - 1]
- set keys [lreplace $keys $ndx $ndx]
-
- if { $do_renumber == 1 } {
- set r [expr $recno - 1]
- set sorted_keys [lreplace $sorted_keys $r $r]
- }
-
- # Check that the keys after it have been renumbered
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { $do_renumber == 1 && $recno != $count } {
- set r [expr $recno - 1]
- set ret [eval {$db get} $txn $gflags {$recno}]
- error_check_good get_after_del \
- [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Decrement count
- incr count -1
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test025.tcl b/storage/bdb/test/test025.tcl
deleted file mode 100644
index 8f3cb5c0cd3..00000000000
--- a/storage/bdb/test/test025.tcl
+++ /dev/null
@@ -1,146 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test025.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test025
-# TEST DB_APPEND flag test.
-proc test025 { method {nentries 10000} {start 0 } {tnum "025"} args} {
- global kvals
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- puts "Test$tnum: $method ($args)"
-
- if { [string compare $omethod "-btree"] == 0 } {
- puts "Test$tnum skipping for method BTREE"
- return
- }
- if { [string compare $omethod "-hash"] == 0 } {
- puts "Test$tnum skipping for method HASH"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
-
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- puts "\tTest$tnum.a: put/get loop"
- set gflags " -recno"
- set pflags " -append"
- set txn ""
- set checkfunc test025_check
-
- # Here is the loop where we put and get each key/data pair
- set count $start
- set nentries [expr $start + $nentries]
- if { $count != 0 } {
- gets $did str
- set k [expr $count + 1]
- set kvals($k) [pad_data $method $str]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
- error_check_good db_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
-
- while { [gets $did str] != -1 && $count < $nentries } {
- set k [expr $count + 1]
- set kvals($k) [pad_data $method $str]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
- error_check_good db_put $ret $k
-
- set ret [eval {$db get} $txn $gflags {$k}]
- error_check_good \
- get $ret [list [list $k [pad_data $method $str]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # The recno key will be count + 1, so when we hit
- # UINT32_MAX - 1, reset to 0.
- if { $count == [expr 0xfffffffe] } {
- set count 0
- } else {
- incr count
- }
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.c: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction -first -next
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $t1 $checkfunc \
- dump_file_direction -last -prev
-}
-
-proc test025_check { key data } {
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good " key/data mismatch for |$key|" $data $kvals($key)
-}
diff --git a/storage/bdb/test/test026.tcl b/storage/bdb/test/test026.tcl
deleted file mode 100644
index ce91e2b464d..00000000000
--- a/storage/bdb/test/test026.tcl
+++ /dev/null
@@ -1,155 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test026.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test026
-# TEST Small keys/medium data w/duplicates
-# TEST Put/get per key.
-# TEST Loop through keys -- delete each key
-# TEST ... test that cursors delete duplicates correctly
-# TEST
-# TEST Keyed delete test through cursor. If ndups is small; this will
-# TEST test on-page dups; if it's large, it will test off-page dups.
-proc test026 { method {nentries 2000} {ndups 5} {tnum "026"} args} {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the defaults down a bit.
- # If we are wanting a lot of dups, set that
- # down a bit or repl testing takes very long.
- #
- if { $nentries == 2000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- puts "Test$tnum: $method ($args) $nentries keys\
- with $ndups dups; cursor delete test"
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
-
- puts "\tTest$tnum.a: Put loop"
- set db [eval {berkdb_open -create \
- -mode 0644} $args {$omethod -dup $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
- while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
- set datastr [ make_data_str $str ]
- for { set j 1 } { $j <= $ndups} {incr j} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $j$datastr]}]
- error_check_good db_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- }
- close $did
-
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Now we will sequentially traverse the database getting each
- # item and deleting it.
- set count 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- puts "\tTest$tnum.b: Get/delete loop"
- set i 1
- for { set ret [$dbc get -first] } {
- [string length $ret] != 0 } {
- set ret [$dbc get -next] } {
-
- set key [lindex [lindex $ret 0] 0]
- set data [lindex [lindex $ret 0] 1]
- if { $i == 1 } {
- set curkey $key
- }
- error_check_good seq_get:key $key $curkey
- error_check_good \
- seq_get:data $data [pad_data $method $i[make_data_str $key]]
-
- if { $i == $ndups } {
- set i 1
- } else {
- incr i
- }
-
- # Now delete the key
- set ret [$dbc del]
- error_check_good db_del:$key $ret 0
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.c: Verify empty file"
- # Double check that file is now empty
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set ret [$dbc get -first]
- error_check_good get_on_empty [string length $ret] 0
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test027.tcl b/storage/bdb/test/test027.tcl
deleted file mode 100644
index 7f6d78c3ad6..00000000000
--- a/storage/bdb/test/test027.tcl
+++ /dev/null
@@ -1,17 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test027.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test027
-# TEST Off-page duplicate test
-# TEST Test026 with parameters to force off-page duplicates.
-# TEST
-# TEST Check that delete operations work. Create a database; close
-# TEST database and reopen it. Then issues delete by key for each
-# TEST entry.
-proc test027 { method {nentries 100} args} {
- eval {test026 $method $nentries 100 "027"} $args
-}
diff --git a/storage/bdb/test/test028.tcl b/storage/bdb/test/test028.tcl
deleted file mode 100644
index 3884d83e965..00000000000
--- a/storage/bdb/test/test028.tcl
+++ /dev/null
@@ -1,222 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test028.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test028
-# TEST Cursor delete test
-# TEST Test put operations after deleting through a cursor.
-proc test028 { method args } {
- global dupnum
- global dupstr
- global alphabet
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test028: $method put after cursor delete test"
-
- if { [is_rbtree $method] == 1 } {
- puts "Test028 skipping for method $method"
- return
- }
- if { [is_record_based $method] == 1 } {
- set key 10
- } else {
- append args " -dup"
- set key "put_after_cursor_del"
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test028.db
- set env NULL
- } else {
- set testfile test028.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set ndups 20
- set txn ""
- set pflags ""
- set gflags ""
-
- if { [is_record_based $method] == 1 } {
- set gflags " -recno"
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- foreach i { offpage onpage } {
- foreach b { bigitem smallitem } {
- if { $i == "onpage" } {
- if { $b == "bigitem" } {
- set dupstr [repeat $alphabet 100]
- } else {
- set dupstr DUP
- }
- } else {
- if { $b == "bigitem" } {
- set dupstr [repeat $alphabet 100]
- } else {
- set dupstr [repeat $alphabet 50]
- }
- }
-
- if { $b == "bigitem" } {
- set dupstr [repeat $dupstr 10]
- }
- puts "\tTest028: $i/$b"
-
- puts "\tTest028.a: Insert key with single data item"
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $dupstr]}]
- error_check_good db_put $ret 0
-
- # Now let's get the item and make sure its OK.
- puts "\tTest028.b: Check initial entry"
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good db_get \
- $ret [list [list $key [pad_data $method $dupstr]]]
-
- # Now try a put with NOOVERWRITE SET (should be error)
- puts "\tTest028.c: No_overwrite test"
- set ret [eval {$db put} $txn \
- {-nooverwrite $key [chop_data $method $dupstr]}]
- error_check_good \
- db_put [is_substr $ret "DB_KEYEXIST"] 1
-
- # Now delete the item with a cursor
- puts "\tTest028.d: Delete test"
- set ret [$dbc get -set $key]
- error_check_bad dbc_get:SET [llength $ret] 0
-
- set ret [$dbc del]
- error_check_good dbc_del $ret 0
-
- puts "\tTest028.e: Reput the item"
- set ret [eval {$db put} $txn \
- {-nooverwrite $key [chop_data $method $dupstr]}]
- error_check_good db_put $ret 0
-
- puts "\tTest028.f: Retrieve the item"
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good db_get $ret \
- [list [list $key [pad_data $method $dupstr]]]
-
- # Delete the key to set up for next test
- set ret [eval {$db del} $txn {$key}]
- error_check_good db_del $ret 0
-
- # Now repeat the above set of tests with
- # duplicates (if not RECNO).
- if { [is_record_based $method] == 1 } {
- continue;
- }
-
- puts "\tTest028.g: Insert key with duplicates"
- for { set count 0 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} $txn \
- {$key [chop_data $method $count$dupstr]}]
- error_check_good db_put $ret 0
- }
-
- puts "\tTest028.h: Check dups"
- set dupnum 0
- dump_file $db $txn $t1 test028.check
-
- # Try no_overwrite
- puts "\tTest028.i: No_overwrite test"
- set ret [eval {$db put} \
- $txn {-nooverwrite $key $dupstr}]
- error_check_good \
- db_put [is_substr $ret "DB_KEYEXIST"] 1
-
- # Now delete all the elements with a cursor
- puts "\tTest028.j: Cursor Deletes"
- set count 0
- for { set ret [$dbc get -set $key] } {
- [string length $ret] != 0 } {
- set ret [$dbc get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_good db_seq(key) $k $key
- error_check_good db_seq(data) $d $count$dupstr
- set ret [$dbc del]
- error_check_good dbc_del $ret 0
- incr count
- if { $count == [expr $ndups - 1] } {
- puts "\tTest028.k:\
- Duplicate No_Overwrite test"
- set ret [eval {$db put} $txn \
- {-nooverwrite $key $dupstr}]
- error_check_good db_put [is_substr \
- $ret "DB_KEYEXIST"] 1
- }
- }
-
- # Make sure all the items are gone
- puts "\tTest028.l: Get after delete"
- set ret [$dbc get -set $key]
- error_check_good get_after_del [string length $ret] 0
-
- puts "\tTest028.m: Reput the item"
- set ret [eval {$db put} \
- $txn {-nooverwrite $key 0$dupstr}]
- error_check_good db_put $ret 0
- for { set count 1 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} $txn \
- {$key $count$dupstr}]
- error_check_good db_put $ret 0
- }
-
- puts "\tTest028.n: Retrieve the item"
- set dupnum 0
- dump_file $db $txn $t1 test028.check
-
- # Clean out in prep for next test
- set ret [eval {$db del} $txn {$key}]
- error_check_good db_del $ret 0
- }
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
-}
-
-# Check function for test028; keys and data are identical
-proc test028.check { key data } {
- global dupnum
- global dupstr
- error_check_good "Bad key" $key put_after_cursor_del
- error_check_good "data mismatch for $key" $data $dupnum$dupstr
- incr dupnum
-}
diff --git a/storage/bdb/test/test029.tcl b/storage/bdb/test/test029.tcl
deleted file mode 100644
index 53622efeb73..00000000000
--- a/storage/bdb/test/test029.tcl
+++ /dev/null
@@ -1,247 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test029.tcl,v 11.24 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test029
-# TEST Test the Btree and Record number renumbering.
-proc test029 { method {nentries 10000} args} {
- source ./include.tcl
-
- set do_renumber [is_rrecno $method]
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test029: $method ($args)"
-
- if { [string compare $omethod "-hash"] == 0 } {
- puts "Test029 skipping for method HASH"
- return
- }
- if { [is_record_based $method] == 1 && $do_renumber != 1 } {
- puts "Test029 skipping for method RECNO (w/out renumbering)"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test029.db
- set env NULL
- } else {
- set testfile test029.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- # Do not set nentries down to 100 until we
- # fix SR #5958.
- set nentries 1000
- }
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- # Read the first nentries dictionary elements and reverse them.
- # Keep a list of these (these will be the keys).
- puts "\tTest029.a: initialization"
- set keys ""
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- lappend keys [reverse $str]
- incr count
- }
- close $did
-
- # Generate sorted order for the keys
- set sorted_keys [lsort $keys]
-
- # Save the first and last keys
- set last_key [lindex $sorted_keys end]
- set last_keynum [llength $sorted_keys]
-
- set first_key [lindex $sorted_keys 0]
- set first_keynum 1
-
- # Create the database
- if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create \
- -mode 0644 -recnum} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [eval {berkdb_open -create \
- -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
-
- set pflags ""
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- puts "\tTest029.b: put/get loop"
- foreach k $keys {
- if { [is_record_based $method] == 1 } {
- set key [lsearch $sorted_keys $k]
- incr key
- } else {
- set key $k
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $k]}]
- error_check_good dbput $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good dbget [lindex [lindex $ret 0] 1] $k
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Now delete the first key in the database
- puts "\tTest029.c: delete and verify renumber"
-
- # Delete the first key in the file
- if { [is_record_based $method] == 1 } {
- set key $first_keynum
- } else {
- set key $first_key
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db del} $txn {$key}]
- error_check_good db_del $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now we are ready to retrieve records based on
- # record number
- if { [string compare $omethod "-btree"] == 0 } {
- append gflags " -recno"
- }
-
- # First try to get the old last key (shouldn't exist)
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db get} $txn $gflags {$last_keynum}]
- error_check_good get_after_del $ret [list]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Now try to get what we think should be the last key
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
- error_check_good \
- getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Create a cursor; we need it for the next test and we
- # need it for recno here.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # OK, now re-put the first key and make sure that we
- # renumber the last key appropriately.
- if { [string compare $omethod "-btree"] == 0 } {
- set ret [eval {$db put} $txn \
- {$key [chop_data $method $first_key]}]
- error_check_good db_put $ret 0
- } else {
- # Recno
- set ret [$dbc get -first]
- set ret [eval {$dbc put} $pflags {-before $first_key}]
- error_check_bad dbc_put:DB_BEFORE $ret 0
- }
-
- # Now check that the last record matches the last record number
- set ret [eval {$db get} $txn $gflags {$last_keynum}]
- error_check_good \
- getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
-
- # Now delete the first key in the database using a cursor
- puts "\tTest029.d: delete with cursor and verify renumber"
-
- set ret [$dbc get -first]
- error_check_good dbc_first $ret [list [list $key $first_key]]
-
- # Now delete at the cursor
- set ret [$dbc del]
- error_check_good dbc_del $ret 0
-
- # Now check the record numbers of the last keys again.
- # First try to get the old last key (shouldn't exist)
- set ret [eval {$db get} $txn $gflags {$last_keynum}]
- error_check_good get_last_after_cursor_del:$ret $ret [list]
-
- # Now try to get what we think should be the last key
- set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
- error_check_good \
- getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
-
- # Re-put the first key and make sure that we renumber the last
- # key appropriately. We can't do a c_put -current, so do
- # a db put instead.
- if { [string compare $omethod "-btree"] == 0 } {
- puts "\tTest029.e: put (non-cursor) and verify renumber"
- set ret [eval {$db put} $txn \
- {$key [chop_data $method $first_key]}]
- error_check_good db_put $ret 0
- } else {
- puts "\tTest029.e: put with cursor and verify renumber"
- set ret [eval {$dbc put} $pflags {-before $first_key}]
- error_check_bad dbc_put:DB_BEFORE $ret 0
- }
-
- # Now check that the last record matches the last record number
- set ret [eval {$db get} $txn $gflags {$last_keynum}]
- error_check_good \
- get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test030.tcl b/storage/bdb/test/test030.tcl
deleted file mode 100644
index 3ee9daa3f50..00000000000
--- a/storage/bdb/test/test030.tcl
+++ /dev/null
@@ -1,231 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test030.tcl,v 11.20 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test030
-# TEST Test DB_NEXT_DUP Functionality.
-proc test030 { method {nentries 10000} args } {
- global rand_init
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 ||
- [is_rbtree $method] == 1 } {
- puts "Test030 skipping for method $method"
- return
- }
- berkdb srand $rand_init
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test030.db
- set cntfile $testdir/cntfile.db
- set env NULL
- } else {
- set testfile test030.db
- set cntfile cntfile.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
-
- puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- set db [eval {berkdb_open -create \
- -mode 0644 -dup} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Use a second DB to keep track of how many duplicates
- # we enter per key
-
- set cntdb [eval {berkdb_open -create \
- -mode 0644} $args {-btree $cntfile}]
- error_check_good dbopen:cntfile [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- # We will add between 1 and 10 dups with values 1 ... dups
- # We'll verify each addition.
-
- set did [open $dict]
- puts "\tTest030.a: put and get duplicate keys."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
-
- while { [gets $did str] != -1 && $count < $nentries } {
- set ndup [berkdb random_int 1 10]
-
- for { set i 1 } { $i <= $ndup } { incr i 1 } {
- set ctxn ""
- if { $txnenv == 1 } {
- set ct [$env txn]
- error_check_good txn \
- [is_valid_txn $ct $env] TRUE
- set ctxn "-txn $ct"
- }
- set ret [eval {$cntdb put} \
- $ctxn $pflags {$str [chop_data $method $ndup]}]
- error_check_good put_cnt $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$ct commit] 0
- }
- set datastr $i:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- }
-
- # Now retrieve all the keys matching this key
- set x 0
- for {set ret [$dbc get -set $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- incr x
-
- if { [llength $ret] == 0 } {
- break
- }
-
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
-
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- error_check_good Test030:put $d $str
-
- set id [ id_of $datastr ]
- error_check_good Test030:dup# $id $x
- }
- error_check_good Test030:numdups $x $ndup
- incr count
- }
- close $did
-
- # Verify on sequential pass of entire file
- puts "\tTest030.b: sequential check"
-
- # We can't just set lastkey to a null string, since that might
- # be a key now!
- set lastkey "THIS STRING WILL NEVER BE A KEY"
-
- for {set ret [$dbc get -first]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -next] } {
-
- # Outer loop should always get a new key
-
- set k [lindex [lindex $ret 0] 0]
- error_check_bad outer_get_loop:key $k $lastkey
-
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- set id [ id_of $datastr ]
-
- error_check_good outer_get_loop:data $d $k
- error_check_good outer_get_loop:id $id 1
-
- set lastkey $k
- # Figure out how may dups we should have
- if { $txnenv == 1 } {
- set ct [$env txn]
- error_check_good txn [is_valid_txn $ct $env] TRUE
- set ctxn "-txn $ct"
- }
- set ret [eval {$cntdb get} $ctxn $pflags {$k}]
- set ndup [lindex [lindex $ret 0] 1]
- if { $txnenv == 1 } {
- error_check_good txn [$ct commit] 0
- }
-
- set howmany 1
- for { set ret [$dbc get -nextdup] } \
- { [llength $ret] != 0 } \
- { set ret [$dbc get -nextdup] } {
- incr howmany
-
- set k [lindex [lindex $ret 0] 0]
- error_check_good inner_get_loop:key $k $lastkey
-
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- set id [ id_of $datastr ]
-
- error_check_good inner_get_loop:data $d $k
- error_check_good inner_get_loop:id $id $howmany
-
- }
- error_check_good ndups_found $howmany $ndup
- }
-
- # Verify on key lookup
- puts "\tTest030.c: keyed check"
- set cnt_dbc [$cntdb cursor]
- for {set ret [$cnt_dbc get -first]} \
- {[llength $ret] != 0} \
- {set ret [$cnt_dbc get -next] } {
- set k [lindex [lindex $ret 0] 0]
-
- set howmany [lindex [lindex $ret 0] 1]
- error_check_bad cnt_seq:data [string length $howmany] 0
-
- set i 0
- for {set ret [$dbc get -set $k]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- incr i
-
- set k [lindex [lindex $ret 0] 0]
-
- set datastr [lindex [lindex $ret 0] 1]
- set d [data_of $datastr]
- set id [ id_of $datastr ]
-
- error_check_good inner_get_loop:data $d $k
- error_check_good inner_get_loop:id $id $i
- }
- error_check_good keyed_count $i $howmany
-
- }
- error_check_good cnt_curs_close [$cnt_dbc close] 0
- error_check_good db_curs_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good cnt_file_close [$cntdb close] 0
- error_check_good db_file_close [$db close] 0
-}
diff --git a/storage/bdb/test/test031.tcl b/storage/bdb/test/test031.tcl
deleted file mode 100644
index 2b4ad0d9878..00000000000
--- a/storage/bdb/test/test031.tcl
+++ /dev/null
@@ -1,230 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test031.tcl,v 11.27 2004/01/28 03:36:30 bostic Exp $
-#
-# TEST test031
-# TEST Duplicate sorting functionality
-# TEST Make sure DB_NODUPDATA works.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and "ndups" duplicates
-# TEST For the data field, prepend random five-char strings (see test032)
-# TEST that we force the duplicate sorting code to do something.
-# TEST Along the way, test that we cannot insert duplicate duplicates
-# TEST using DB_NODUPDATA.
-# TEST
-# TEST By setting ndups large, we can make this an off-page test
-# TEST After all are entered, retrieve all; verify output.
-# TEST Close file, reopen, do retrieve and re-verify.
-# TEST This does not work for recno
-proc test031 { method {nentries 10000} {ndups 5} {tnum "031"} args } {
- global alphabet
- global rand_init
- source ./include.tcl
-
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set checkdb $testdir/checkdb.db
- set env NULL
- } else {
- set testfile test$tnum.db
- set checkdb checkdb.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- puts "Test$tnum: \
- $method ($args) $nentries small $ndups sorted dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $omethod"
- return
- }
- set db [eval {berkdb_open -create \
- -mode 0644} $args {$omethod -dup -dupsort $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set check_db [eval {berkdb_open \
- -create -mode 0644} $args {-hash $checkdb}]
- error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- puts "\tTest$tnum.a: Put/get loop, check nodupdata"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- # Re-initialize random string generator
- randstring_init $ndups
-
- set dups ""
- for { set i 1 } { $i <= $ndups } { incr i } {
- set pref [randstring]
- set dups $dups$pref
- set datastr $pref:$str
- if { $i == 2 } {
- set nodupstr $datastr
- }
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- }
-
- # Test DB_NODUPDATA using the DB handle
- set ret [eval {$db put -nodupdata} \
- $txn $pflags {$str [chop_data $method $nodupstr]}]
- error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
-
- set ret [eval {$check_db put} \
- $txn $pflags {$str [chop_data $method $dups]}]
- error_check_good checkdb_put $ret 0
-
- # Now retrieve all the keys matching this key
- set x 0
- set lastdup ""
- # Test DB_NODUPDATA using cursor handle
- set ret [$dbc get -set $str]
- error_check_bad dbc_get [llength $ret] 0
- set datastr [lindex [lindex $ret 0] 1]
- error_check_bad dbc_data [string length $datastr] 0
- set ret [eval {$dbc put -nodupdata} \
- {$str [chop_data $method $datastr]}]
- error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
-
- for {set ret [$dbc get -set $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- if {[string length $datastr] == 0} {
- break
- }
- if {[string compare \
- $lastdup [pad_data $method $datastr]] > 0} {
- error_check_good \
- sorted_dups($lastdup,$datastr) 0 1
- }
- incr x
- set lastdup $datastr
- }
- error_check_good "Test$tnum:ndups:$str" $x $ndups
- incr count
- }
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: Checking file for correct duplicates"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
-
- set lastkey "THIS WILL NEVER BE A KEY VALUE"
- # no need to delete $lastkey
- set firsttimethru 1
- for {set ret [$dbc get -first]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- if { [string compare $k $lastkey] != 0 } {
- # Remove last key from the checkdb
- if { $firsttimethru != 1 } {
- error_check_good check_db:del:$lastkey \
- [eval {$check_db del} $txn {$lastkey}] 0
- }
- set firsttimethru 0
- set lastdup ""
- set lastkey $k
- set dups [lindex [lindex [eval {$check_db get} \
- $txn {$k}] 0] 1]
- error_check_good check_db:get:$k \
- [string length $dups] [expr $ndups * 4]
- }
-
- if { [string compare $lastdup $d] > 0 } {
- error_check_good dup_check:$k:$d 0 1
- }
- set lastdup $d
-
- set pref [string range $d 0 3]
- set ndx [string first $pref $dups]
- error_check_good valid_duplicate [expr $ndx >= 0] 1
- set a [string range $dups 0 [expr $ndx - 1]]
- set b [string range $dups [expr $ndx + 4] end]
- set dups $a$b
- }
- # Remove last key from the checkdb
- if { [string length $lastkey] != 0 } {
- error_check_good check_db:del:$lastkey \
- [eval {$check_db del} $txn {$lastkey}] 0
- }
-
- # Make sure there is nothing left in check_db
-
- set check_c [eval {$check_db cursor} $txn]
- set ret [$check_c get -first]
- error_check_good check_c:get:$ret [llength $ret] 0
- error_check_good check_c:close [$check_c close] 0
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good check_db:close [$check_db close] 0
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test032.tcl b/storage/bdb/test/test032.tcl
deleted file mode 100644
index e7cc49b4776..00000000000
--- a/storage/bdb/test/test032.tcl
+++ /dev/null
@@ -1,231 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test032.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test032
-# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
-# TEST
-# TEST Use the first 10,000 entries from the dictionary. Insert each with
-# TEST self as key and "ndups" duplicates. For the data field, prepend the
-# TEST letters of the alphabet in a random order so we force the duplicate
-# TEST sorting code to do something. By setting ndups large, we can make
-# TEST this an off-page test.
-# TEST
-# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
-# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
-# TEST the unique key prefix (cursor only). Finally test the failure case.
-proc test032 { method {nentries 10000} {ndups 5} {tnum "032"} args } {
- global alphabet rand_init
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- berkdb srand $rand_init
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set checkdb $testdir/checkdb.db
- set env NULL
- } else {
- set testfile test$tnum.db
- set checkdb checkdb.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- puts "Test$tnum:\
- $method ($args) $nentries small sorted $ndups dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $omethod"
- return
- }
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod -dup -dupsort} $args {$testfile} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set check_db [eval {berkdb_open \
- -create -mode 0644} $args {-hash $checkdb}]
- error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- puts "\tTest$tnum.a: Put/get loop"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- # Re-initialize random string generator
- randstring_init $ndups
-
- set dups ""
- for { set i 1 } { $i <= $ndups } { incr i } {
- set pref [randstring]
- set dups $dups$pref
- set datastr $pref:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- }
- set ret [eval {$check_db put} \
- $txn $pflags {$str [chop_data $method $dups]}]
- error_check_good checkdb_put $ret 0
-
- # Now retrieve all the keys matching this key
- set x 0
- set lastdup ""
- for {set ret [$dbc get -set $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- if {[string length $datastr] == 0} {
- break
- }
- if {[string compare $lastdup $datastr] > 0} {
- error_check_good \
- sorted_dups($lastdup,$datastr) 0 1
- }
- incr x
- set lastdup $datastr
- }
-
- error_check_good "Test$tnum:ndups:$str" $x $ndups
- incr count
- }
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: Checking file for correct duplicates (no cursor)"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set check_c [eval {$check_db cursor} $txn]
- error_check_good check_c_open(2) \
- [is_valid_cursor $check_c $check_db] TRUE
-
- for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set pref [string range $d $ndx [expr $ndx + 3]]
- set data $pref:$k
- set ret [eval {$db get} $txn {-get_both $k $data}]
- error_check_good \
- get_both_data:$k $ret [list [list $k $data]]
- }
- }
-
- $db sync
-
- # Now repeat the above test using cursor ops
- puts "\tTest$tnum.c: Checking file for correct duplicates (cursor)"
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
-
- for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set pref [string range $d $ndx [expr $ndx + 3]]
- set data $pref:$k
- set ret [eval {$dbc get} {-get_both $k $data}]
- error_check_good \
- curs_get_both_data:$k $ret [list [list $k $data]]
-
- set ret [eval {$dbc get} {-get_both_range $k $pref}]
- error_check_good \
- curs_get_both_range:$k $ret [list [list $k $data]]
- }
- }
-
- # Now check the error case
- puts "\tTest$tnum.d: Check error case (no cursor)"
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set data XXX$k
- set ret [eval {$db get} $txn {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
- }
-
- # Now check the error case
- puts "\tTest$tnum.e: Check error case (cursor)"
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set data XXX$k
- set ret [eval {$dbc get} {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
- }
-
- error_check_good check_c:close [$check_c close] 0
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good check_db:close [$check_db close] 0
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test033.tcl b/storage/bdb/test/test033.tcl
deleted file mode 100644
index b606883c1a0..00000000000
--- a/storage/bdb/test/test033.tcl
+++ /dev/null
@@ -1,176 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test033.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test033
-# TEST DB_GET_BOTH without comparison function
-# TEST
-# TEST Use the first 10,000 entries from the dictionary. Insert each with
-# TEST self as key and data; add duplicate records for each. After all are
-# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
-# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
-# TEST nonexistent keys.
-# TEST
-# TEST XXX
-# TEST This does not work for rbtree.
-proc test033 { method {nentries 10000} {ndups 5} {tnum "033"} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- if { [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
-
- puts "Test$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- # Duplicate data entries are not allowed in record based methods.
- if { [is_record_based $method] == 1 } {
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod} $args {$testfile}]
- } else {
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod -dup} $args {$testfile}]
- }
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
- # Allocate a cursor for DB_GET_BOTH_RANGE.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
-
- puts "\tTest$tnum.a: Put/get loop."
- # Here is the loop where we put and get each key/data pair
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set ret [eval {$db put} $txn $pflags \
- {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- } else {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good db_put $ret 0
- }
- }
-
- # Now retrieve all the keys matching this key and dup
- # for non-record based AMs.
- if { [is_record_based $method] == 1 } {
- test033_recno.check $db $dbc $method $str $txn $key
- } else {
- test033_check $db $dbc $method $str $txn $ndups
- }
- incr count
- }
-
- close $did
-
- puts "\tTest$tnum.b: Verifying DB_GET_BOTH after creation."
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- # Now retrieve all the keys matching this key
- # for non-record based AMs.
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- test033_recno.check $db $dbc $method $str $txn $key
- } else {
- test033_check $db $dbc $method $str $txn $ndups
- }
- incr count
- }
- close $did
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
-
-# No testing of dups is done on record-based methods.
-proc test033_recno.check {db dbc method str txn key} {
- set ret [eval {$db get} $txn {-recno $key}]
- error_check_good "db_get:$method" \
- [lindex [lindex $ret 0] 1] [pad_data $method $str]
- set ret [$dbc get -get_both $key [pad_data $method $str]]
- error_check_good "db_get_both:$method" \
- [lindex [lindex $ret 0] 1] [pad_data $method $str]
-}
-
-# Testing of non-record-based methods includes duplicates
-# and get_both_range.
-proc test033_check {db dbc method str txn ndups} {
- for {set i 1} {$i <= $ndups } { incr i } {
- set datastr $i:$str
-
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good "db_get_both:dup#" \
- [lindex [lindex $ret 0] 1] $datastr
-
- set ret [$dbc get -get_both $str $datastr]
- error_check_good "dbc_get_both:dup#" \
- [lindex [lindex $ret 0] 1] $datastr
-
- set ret [$dbc get -get_both_range $str $datastr]
- error_check_good "dbc_get_both_range:dup#" \
- [lindex [lindex $ret 0] 1] $datastr
- }
-
- # Now retrieve non-existent dup (i is ndups + 1)
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good db_get_both:dupfailure [llength $ret] 0
- set ret [$dbc get -get_both $str $datastr]
- error_check_good dbc_get_both:dupfailure [llength $ret] 0
- set ret [$dbc get -get_both_range $str $datastr]
- error_check_good dbc_get_both_range [llength $ret] 0
-}
diff --git a/storage/bdb/test/test034.tcl b/storage/bdb/test/test034.tcl
deleted file mode 100644
index 5da92052214..00000000000
--- a/storage/bdb/test/test034.tcl
+++ /dev/null
@@ -1,23 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1998-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test034.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test034
-# TEST test032 with off-page duplicates
-# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
-proc test034 { method {nentries 10000} args} {
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test034: Skipping for specific pagesizes"
- return
- }
- # Test with off-page duplicates
- eval {test032 $method $nentries 20 "034" -pagesize 512} $args
-
- # Test with multiple pages of off-page duplicates
- eval {test032 $method [expr $nentries / 10] 100 "034" -pagesize 512} \
- $args
-}
diff --git a/storage/bdb/test/test035.tcl b/storage/bdb/test/test035.tcl
deleted file mode 100644
index 63945c58875..00000000000
--- a/storage/bdb/test/test035.tcl
+++ /dev/null
@@ -1,22 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test035.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test035
-# TEST Test033 with off-page duplicates
-# TEST DB_GET_BOTH functionality with off-page duplicates.
-proc test035 { method {nentries 10000} args} {
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test035: Skipping for specific pagesizes"
- return
- }
- # Test with off-page duplicates
- eval {test033 $method $nentries 20 "035" -pagesize 512} $args
- # Test with multiple pages of off-page duplicates
- eval {test033 $method [expr $nentries / 10] 100 "035" -pagesize 512} \
- $args
-}
diff --git a/storage/bdb/test/test036.tcl b/storage/bdb/test/test036.tcl
deleted file mode 100644
index 5fe24cb21ae..00000000000
--- a/storage/bdb/test/test036.tcl
+++ /dev/null
@@ -1,173 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test036.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test036
-# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
-# TEST Put nentries key/data pairs (from the dictionary) using a cursor
-# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
-# TEST put for non-existent keys).
-proc test036 { method {nentries 10000} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- if { [is_record_based $method] == 1 } {
- puts "Test036 skipping for method recno"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test036.db
- set env NULL
- } else {
- set testfile test036.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
-
- puts "Test036: $method ($args) $nentries equal key/data pairs"
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test036_recno.check
- append gflags " -recno"
- } else {
- set checkfunc test036.check
- }
- puts "\tTest036.a: put/get loop KEYFIRST"
- # Here is the loop where we put and get each key/data pair
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) $str
- } else {
- set key $str
- }
- set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good get [lindex [lindex $ret 0] 1] $str
- incr count
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- puts "\tTest036.a: put/get loop KEYLAST"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) $str
- } else {
- set key $str
- }
- set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good get [lindex [lindex $ret 0] 1] $str
- incr count
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest036.c: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
-}
-
-# Check function for test036; keys and data are identical
-proc test036.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
-
-proc test036_recno.check { key data } {
- global dict
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
diff --git a/storage/bdb/test/test037.tcl b/storage/bdb/test/test037.tcl
deleted file mode 100644
index c571ffa3e9d..00000000000
--- a/storage/bdb/test/test037.tcl
+++ /dev/null
@@ -1,196 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test037.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test037
-# TEST Test DB_RMW
-proc test037 { method {nentries 100} args } {
- global encrypt
-
- source ./include.tcl
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then skip this test. It needs its own.
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test037 skipping for env $env"
- return
- }
-
- puts "Test037: RMW $method"
-
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- # Create the database
- env_cleanup $testdir
- set testfile test037.db
-
- set local_env \
- [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
- error_check_good dbenv [is_valid_env $local_env] TRUE
-
- set db [eval {berkdb_open \
- -env $local_env -create -mode 0644 $omethod} $args {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
- set count 0
-
- set pflags ""
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- puts "\tTest037.a: Creating database"
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good get \
- [lindex [lindex $ret 0] 1] [pad_data $method $str]
- incr count
- }
- close $did
- error_check_good dbclose [$db close] 0
- error_check_good envclode [$local_env close] 0
-
- puts "\tTest037.b: Setting up environments"
-
- # Open local environment
- set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
- set local_env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $local_env] TRUE
-
- # Open local transaction
- set local_txn [$local_env txn]
- error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
-
- # Open remote environment
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
-
- set remote_env [send_cmd $f1 $env_cmd]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- # Open remote transaction
- set remote_txn [send_cmd $f1 "$remote_env txn"]
- error_check_good \
- remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
-
- # Now try put test without RMW. Gets on one site should not
- # lock out gets on another.
-
- # Open databases and dictionary
- puts "\tTest037.c: Opening databases"
- set did [open $dict]
- set rkey 0
-
- set db [berkdb_open -auto_commit -env $local_env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set rdb [send_cmd $f1 \
- "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
- error_check_good remote:dbopen [is_valid_db $rdb] TRUE
-
- puts "\tTest037.d: Testing without RMW"
-
- # Now, get a key and try to "get" it from both DBs.
- error_check_bad "gets on new open" [gets $did str] -1
- incr rkey
- if { [is_record_based $method] == 1 } {
- set key $rkey
- } else {
- set key $str
- }
-
- set rec [eval {$db get -txn $local_txn} $gflags {$key}]
- error_check_good local_get [lindex [lindex $rec 0] 1] \
- [pad_data $method $str]
-
- set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
- error_check_good remote_send $r 0
-
- # Now sleep before releasing local record lock
- tclsleep 5
- error_check_good local_commit [$local_txn commit] 0
-
- # Now get the remote result
- set remote_time [rcv_result $f1]
- error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
-
- # Commit the remote
- set r [send_cmd $f1 "$remote_txn commit"]
- error_check_good remote_commit $r 0
-
- puts "\tTest037.e: Testing with RMW"
-
- # Open local transaction
- set local_txn [$local_env txn]
- error_check_good \
- txn_open [is_valid_txn $local_txn $local_env] TRUE
-
- # Open remote transaction
- set remote_txn [send_cmd $f1 "$remote_env txn"]
- error_check_good remote:txn_open \
- [is_valid_txn $remote_txn $remote_env] TRUE
-
- # Now, get a key and try to "get" it from both DBs.
- error_check_bad "gets on new open" [gets $did str] -1
- incr rkey
- if { [is_record_based $method] == 1 } {
- set key $rkey
- } else {
- set key $str
- }
-
- set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
- error_check_good \
- local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
-
- set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
- error_check_good remote_send $r 0
-
- # Now sleep before releasing local record lock
- tclsleep 5
- error_check_good local_commit [$local_txn commit] 0
-
- # Now get the remote result
- set remote_time [rcv_result $f1]
- error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
-
- # Commit the remote
- set r [send_cmd $f1 "$remote_txn commit"]
- error_check_good remote_commit $r 0
-
- # Close everything up: remote first
- set r [send_cmd $f1 "$rdb close"]
- error_check_good remote_db_close $r 0
-
- set r [send_cmd $f1 "$remote_env close"]
-
- # Close locally
- error_check_good db_close [$db close] 0
- $local_env close
- close $did
- close $f1
-}
diff --git a/storage/bdb/test/test038.tcl b/storage/bdb/test/test038.tcl
deleted file mode 100644
index eaf934f5d13..00000000000
--- a/storage/bdb/test/test038.tcl
+++ /dev/null
@@ -1,227 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test038.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test038
-# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
-# TEST
-# TEST Use the first 10,000 entries from the dictionary. Insert each with
-# TEST self as key and "ndups" duplicates. For the data field, prepend the
-# TEST letters of the alphabet in a random order so we force the duplicate
-# TEST sorting code to do something. By setting ndups large, we can make
-# TEST this an off-page test
-# TEST
-# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
-# TEST each dup in the file explicitly. Then remove each duplicate and try
-# TEST the retrieval again.
-proc test038 { method {nentries 10000} {ndups 5} {tnum "038"} args } {
- global alphabet
- global rand_init
- source ./include.tcl
-
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set checkdb $testdir/checkdb.db
- set env NULL
- } else {
- set testfile test$tnum.db
- set checkdb checkdb.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- puts "Test$tnum: \
- $method ($args) $nentries small sorted dup key/data pairs"
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod -dup -dupsort} $args {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set check_db [eval {berkdb_open \
- -create -mode 0644 -hash} $args {$checkdb}]
- error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- puts "\tTest$tnum.a: Put/get loop"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- set dups ""
- for { set i 1 } { $i <= $ndups } { incr i } {
- set pref \
- [string index $alphabet [berkdb random_int 0 25]]
- set pref $pref[string \
- index $alphabet [berkdb random_int 0 25]]
- while { [string first $pref $dups] != -1 } {
- set pref [string toupper $pref]
- if { [string first $pref $dups] != -1 } {
- set pref [string index $alphabet \
- [berkdb random_int 0 25]]
- set pref $pref[string index $alphabet \
- [berkdb random_int 0 25]]
- }
- }
- if { [string length $dups] == 0 } {
- set dups $pref
- } else {
- set dups "$dups $pref"
- }
- set datastr $pref:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- }
- set ret [eval {$check_db put} \
- $txn $pflags {$str [chop_data $method $dups]}]
- error_check_good checkdb_put $ret 0
-
- # Now retrieve all the keys matching this key
- set x 0
- set lastdup ""
- for {set ret [$dbc get -set $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- if {[string length $datastr] == 0} {
- break
- }
- if {[string compare $lastdup $datastr] > 0} {
- error_check_good sorted_dups($lastdup,$datastr)\
- 0 1
- }
- incr x
- set lastdup $datastr
- }
- error_check_good "Test$tnum:ndups:$str" $x $ndups
- incr count
- }
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $did
-
- # Now check the duplicates, then delete then recheck
- puts "\tTest$tnum.b: Checking and Deleting duplicates"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
-
- for {set ndx 0} {$ndx < $ndups} {incr ndx} {
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set nn [expr $ndx * 3]
- set pref [string range $d $nn [expr $nn + 1]]
- set data $pref:$k
- set ret [$dbc get -get_both $k $data]
- error_check_good \
- get_both_key:$k [lindex [lindex $ret 0] 0] $k
- error_check_good \
- get_both_data:$k [lindex [lindex $ret 0] 1] $data
-
- set ret [$dbc get -get_both_range $k $pref]
- error_check_good \
- get_both_key:$k [lindex [lindex $ret 0] 0] $k
- error_check_good \
- get_both_data:$k [lindex [lindex $ret 0] 1] $data
-
- set ret [$dbc del]
- error_check_good del $ret 0
-
- set ret [eval {$db get} $txn {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
-
- # We should either not find anything (if deleting the
- # largest duplicate in the set) or a duplicate that
- # sorts larger than the one we deleted.
- set ret [$dbc get -get_both_range $k $pref]
- if { [llength $ret] != 0 } {
- set datastr [lindex [lindex $ret 0] 1]]
- if {[string compare \
- $pref [lindex [lindex $ret 0] 1]] >= 0} {
- error_check_good \
- error_case_range:sorted_dups($pref,$datastr) 0 1
- }
- }
-
- if {$ndx != 0} {
- set n [expr ($ndx - 1) * 3]
- set pref [string range $d $n [expr $n + 1]]
- set data $pref:$k
- set ret \
- [eval {$db get} $txn {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
- }
- }
- }
-
- error_check_good check_c:close [$check_c close] 0
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good check_db:close [$check_db close] 0
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test039.tcl b/storage/bdb/test/test039.tcl
deleted file mode 100644
index 67b2eaf3361..00000000000
--- a/storage/bdb/test/test039.tcl
+++ /dev/null
@@ -1,211 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test039.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test039
-# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
-# TEST function.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary. Insert each with
-# TEST self as key and "ndups" duplicates. For the data field, prepend the
-# TEST letters of the alphabet in a random order so we force the duplicate
-# TEST sorting code to do something. By setting ndups large, we can make
-# TEST this an off-page test.
-# TEST
-# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
-# TEST each dup in the file explicitly. Then remove each duplicate and try
-# TEST the retrieval again.
-proc test039 { method {nentries 10000} {ndups 5} {tnum "039"} args } {
- global alphabet
- global rand_init
- source ./include.tcl
-
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set checkdb $testdir/checkdb.db
- set env NULL
- } else {
- set testfile test$tnum.db
- set checkdb checkdb.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- cleanup $testdir $env
-
- puts "Test$tnum: $method $nentries \
- small $ndups unsorted dup key/data pairs"
-
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod -dup} $args {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set check_db [eval \
- {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
- error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- puts "\tTest$tnum.a: Put/get loop"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- while { [gets $did str] != -1 && $count < $nentries } {
- set dups ""
- for { set i 1 } { $i <= $ndups } { incr i } {
- set pref \
- [string index $alphabet [berkdb random_int 0 25]]
- set pref $pref[string \
- index $alphabet [berkdb random_int 0 25]]
- while { [string first $pref $dups] != -1 } {
- set pref [string toupper $pref]
- if { [string first $pref $dups] != -1 } {
- set pref [string index $alphabet \
- [berkdb random_int 0 25]]
- set pref $pref[string index $alphabet \
- [berkdb random_int 0 25]]
- }
- }
- if { [string length $dups] == 0 } {
- set dups $pref
- } else {
- set dups "$dups $pref"
- }
- set datastr $pref:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- }
- set ret [eval {$check_db put} \
- $txn $pflags {$str [chop_data $method $dups]}]
- error_check_good checkdb_put $ret 0
-
- # Now retrieve all the keys matching this key
- set x 0
- set lastdup ""
- for {set ret [$dbc get -set $str]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup] } {
- set k [lindex [lindex $ret 0] 0]
- if { [string compare $k $str] != 0 } {
- break
- }
- set datastr [lindex [lindex $ret 0] 1]
- if {[string length $datastr] == 0} {
- break
- }
- set xx [expr $x * 3]
- set check_data \
- [string range $dups $xx [expr $xx + 1]]:$k
- error_check_good retrieve $datastr $check_data
- incr x
- }
- error_check_good "Test$tnum:ndups:$str" $x $ndups
- incr count
- }
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- close $did
-
- # Now check the duplicates, then delete then recheck
- puts "\tTest$tnum.b: Checking and Deleting duplicates"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
- set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
-
- for {set ndx 0} {$ndx < $ndups} {incr ndx} {
- for {set ret [$check_c get -first]} \
- {[llength $ret] != 0} \
- {set ret [$check_c get -next] } {
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_bad data_check:$d [string length $d] 0
-
- set nn [expr $ndx * 3]
- set pref [string range $d $nn [expr $nn + 1]]
- set data $pref:$k
- set ret [$dbc get -get_both $k $data]
- error_check_good \
- get_both_key:$k [lindex [lindex $ret 0] 0] $k
- error_check_good \
- get_both_data:$k [lindex [lindex $ret 0] 1] $data
-
- set ret [$dbc del]
- error_check_good del $ret 0
-
- set ret [$dbc get -get_both $k $data]
- error_check_good get_both:$k [llength $ret] 0
-
- set ret [$dbc get -get_both_range $k $data]
- error_check_good get_both_range:$k [llength $ret] 0
-
- if {$ndx != 0} {
- set n [expr ($ndx - 1) * 3]
- set pref [string range $d $n [expr $n + 1]]
- set data $pref:$k
- set ret [$dbc get -get_both $k $data]
- error_check_good error_case:$k [llength $ret] 0
- }
- }
- }
-
- error_check_good check_c:close [$check_c close] 0
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- error_check_good check_db:close [$check_db close] 0
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test040.tcl b/storage/bdb/test/test040.tcl
deleted file mode 100644
index 61d5cd1f366..00000000000
--- a/storage/bdb/test/test040.tcl
+++ /dev/null
@@ -1,23 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1998-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test040.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test040
-# TEST Test038 with off-page duplicates
-# TEST DB_GET_BOTH functionality with off-page duplicates.
-proc test040 { method {nentries 10000} args} {
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test040: skipping for specific pagesizes"
- return
- }
- # Test with off-page duplicates
- eval {test038 $method $nentries 20 "040" -pagesize 512} $args
-
- # Test with multiple pages of off-page duplicates
- eval {test038 $method [expr $nentries / 10] 100 "040" -pagesize 512} \
- $args
-}
diff --git a/storage/bdb/test/test041.tcl b/storage/bdb/test/test041.tcl
deleted file mode 100644
index 790ece9437f..00000000000
--- a/storage/bdb/test/test041.tcl
+++ /dev/null
@@ -1,18 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test041.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test041
-# TEST Test039 with off-page duplicates
-# TEST DB_GET_BOTH functionality with off-page duplicates.
-proc test041 { method {nentries 10000} args} {
- # Test with off-page duplicates
- eval {test039 $method $nentries 20 "041" -pagesize 512} $args
-
- # Test with multiple pages of off-page duplicates
- eval {test039 $method [expr $nentries / 10] 100 "041" -pagesize 512} \
- $args
-}
diff --git a/storage/bdb/test/test042.tcl b/storage/bdb/test/test042.tcl
deleted file mode 100644
index b216ebf27d8..00000000000
--- a/storage/bdb/test/test042.tcl
+++ /dev/null
@@ -1,184 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test042.tcl,v 11.46 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test042
-# TEST Concurrent Data Store test (CDB)
-# TEST
-# TEST Multiprocess DB test; verify that locking is working for the
-# TEST concurrent access method product.
-# TEST
-# TEST Use the first "nentries" words from the dictionary. Insert each with
-# TEST self as key and a fixed, medium length data string. Then fire off
-# TEST multiple processes that bang on the database. Each one should try to
-# TEST read and write random keys. When they rewrite, they'll append their
-# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
-# TEST partial put). Some will use cursors to traverse through a few keys
-# TEST before finding one to write.
-
-proc test042 { method {nentries 1000} args } {
- global encrypt
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test042 skipping for env $env"
- return
- }
-
- set args [convert_args $method $args]
- if { $encrypt != 0 } {
- puts "Test042 skipping for security"
- return
- }
- test042_body $method $nentries 0 $args
- test042_body $method $nentries 1 $args
-}
-
-proc test042_body { method nentries alldb args } {
- source ./include.tcl
-
- if { $alldb } {
- set eflag "-cdb -cdb_alldb"
- } else {
- set eflag "-cdb"
- }
- puts "Test042: CDB Test ($eflag) $method $nentries"
-
- # Set initial parameters
- set do_exit 0
- set iter 10000
- set procs 5
-
- # Process arguments
- set oargs ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -dir { incr i; set testdir [lindex $args $i] }
- -iter { incr i; set iter [lindex $args $i] }
- -procs { incr i; set procs [lindex $args $i] }
- -exit { set do_exit 1 }
- default { append oargs " " [lindex $args $i] }
- }
- }
-
- # Create the database and open the dictionary
- set basename test042
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
-
- env_cleanup $testdir
-
- set env [eval {berkdb_env -create} $eflag -home $testdir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- # Env is created, now set up database
- test042_dbinit $env $nentries $method $oargs $basename.0.db
- if { $alldb } {
- for { set i 1 } {$i < $procs} {incr i} {
- test042_dbinit $env $nentries $method $oargs \
- $basename.$i.db
- }
- }
-
- # Remove old mpools and Open/create the lock and mpool regions
- error_check_good env:close:$env [$env close] 0
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
-
- set env [eval {berkdb_env \
- -create -cachesize {0 1048576 1}} $eflag -home $testdir]
- error_check_good dbenv [is_valid_widget $env env] TRUE
-
- if { $do_exit == 1 } {
- return
- }
-
- # Now spawn off processes
- berkdb debug_check
- puts "\tTest042.b: forking off $procs children"
- set pidlist {}
-
- for { set i 0 } {$i < $procs} {incr i} {
- if { $alldb } {
- set tf $basename.$i.db
- } else {
- set tf $basename.0.db
- }
- puts "exec $tclsh_path $test_path/wrap.tcl \
- mdbscript.tcl $testdir/test042.$i.log \
- $method $testdir $tf $nentries $iter $i $procs &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mdbscript.tcl $testdir/test042.$i.log $method \
- $testdir $tf $nentries $iter $i $procs &]
- lappend pidlist $p
- }
- puts "Test042: $procs independent processes now running"
- watch_procs $pidlist
-
- # Make sure we haven't added or lost any entries.
- set dblist [glob $testdir/$basename.*.db]
- foreach file $dblist {
- set tf [file tail $file]
- set db [eval {berkdb_open -env $env $tf}]
- set statret [$db stat]
- foreach pair $statret {
- set fld [lindex $pair 0]
- if { [string compare $fld {Number of records}] == 0 } {
- set numrecs [lindex $pair 1]
- break
- }
- }
- error_check_good nentries $numrecs $nentries
- error_check_good db_close [$db close] 0
- }
-
- # Check for test failure
- set errstrings [eval findfail [glob $testdir/test042.*.log]]
- foreach str $errstrings {
- puts "FAIL: error message in log file: $str"
- }
-
- # Test is done, blow away lock and mpool region
- reset_env $env
-}
-
-proc test042_dbinit { env nentries method oargs tf } {
- global datastr
- source ./include.tcl
-
- set omethod [convert_method $method]
- set db [eval {berkdb_open -env $env -create \
- -mode 0644 $omethod} $oargs $tf]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put each key/data pair
- puts "\tTest042.a: put loop $tf"
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put:$db $ret 0
- incr count
- }
- close $did
- error_check_good close:$db [$db close] 0
-}
diff --git a/storage/bdb/test/test043.tcl b/storage/bdb/test/test043.tcl
deleted file mode 100644
index bbb934ccef1..00000000000
--- a/storage/bdb/test/test043.tcl
+++ /dev/null
@@ -1,192 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test043.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test043
-# TEST Recno renumbering and implicit creation test
-# TEST Test the Record number implicit creation and renumbering options.
-proc test043 { method {nentries 10000} args} {
- source ./include.tcl
-
- set do_renumber [is_rrecno $method]
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test043: $method ($args)"
-
- if { [is_record_based $method] != 1 } {
- puts "Test043 skipping for method $method"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test043.db
- set env NULL
- } else {
- set testfile test043.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- # Create the database
- set db [eval {berkdb_open -create -mode 0644} $args \
- {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags " -recno"
- set txn ""
-
- # First test implicit creation and retrieval
- set count 1
- set interval 5
- if { $nentries < $interval } {
- set nentries [expr $interval + 1]
- }
- puts "\tTest043.a: insert keys at $interval record intervals"
- while { $count <= $nentries } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$count [chop_data $method $count]}]
- error_check_good "$db put $count" $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- set last $count
- incr count $interval
- }
-
- puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
-
- set check 1
- for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
- set rec [$dbc get -next] } {
- set k [lindex [lindex $rec 0] 0]
- set d [pad_data $method [lindex [lindex $rec 0] 1]]
- error_check_good "$dbc get key==data" [pad_data $method $k] $d
- error_check_good "$dbc get sequential" $k $check
- if { $k > $nentries } {
- error_check_good "$dbc get key too large" $k $nentries
- }
- incr check $interval
- }
-
- # Now make sure that we get DB_KEYEMPTY for non-existent keys
- puts "\tTest043.c: Retrieve non-existent keys"
- global errorInfo
-
- set check 1
- for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
- set rec [$dbc get -next] } {
- set k [lindex [lindex $rec 0] 0]
-
- set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
- error_check_good "$db \
- get [expr $k + 1]" $ret [list]
-
- incr check $interval
- # Make sure we don't do a retrieve past the end of file
- if { $check >= $last } {
- break
- }
- }
-
- # Now try deleting and make sure the right thing happens.
- puts "\tTest043.d: Delete tests"
- set rec [$dbc get -first]
- error_check_bad "$dbc get -first" [llength $rec] 0
- error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
- error_check_good "$dbc get -first data" \
- [lindex [lindex $rec 0] 1] [pad_data $method 1]
-
- # Delete the first item
- error_check_good "$dbc del" [$dbc del] 0
-
- # Retrieving 1 should always fail
- set ret [eval {$db get} $txn $gflags {1}]
- error_check_good "$db get 1" $ret [list]
-
- # Now, retrieving other keys should work; keys will vary depending
- # upon renumbering.
- if { $do_renumber == 1 } {
- set count [expr 0 + $interval]
- set max [expr $nentries - 1]
- } else {
- set count [expr 1 + $interval]
- set max $nentries
- }
-
- while { $count <= $max } {
- set rec [eval {$db get} $txn $gflags {$count}]
- if { $do_renumber == 1 } {
- set data [expr $count + 1]
- } else {
- set data $count
- }
- error_check_good "$db get $count" \
- [pad_data $method $data] [lindex [lindex $rec 0] 1]
- incr count $interval
- }
- set max [expr $count - $interval]
-
- puts "\tTest043.e: Verify LAST/PREV functionality"
- set count $max
- for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
- set rec [$dbc get -prev] } {
- set k [lindex [lindex $rec 0] 0]
- set d [lindex [lindex $rec 0] 1]
- if { $do_renumber == 1 } {
- set data [expr $k + 1]
- } else {
- set data $k
- }
- error_check_good \
- "$dbc get key==data" [pad_data $method $data] $d
- error_check_good "$dbc get sequential" $k $count
- if { $k > $nentries } {
- error_check_good "$dbc get key too large" $k $nentries
- }
- set count [expr $count - $interval]
- if { $count < 1 } {
- break
- }
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test044.tcl b/storage/bdb/test/test044.tcl
deleted file mode 100644
index 22d56a4345e..00000000000
--- a/storage/bdb/test/test044.tcl
+++ /dev/null
@@ -1,252 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test044.tcl,v 11.35 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test044
-# TEST Small system integration tests
-# TEST Test proper functioning of the checkpoint daemon,
-# TEST recovery, transactions, etc.
-# TEST
-# TEST System integration DB test: verify that locking, recovery, checkpoint,
-# TEST and all the other utilities basically work.
-# TEST
-# TEST The test consists of $nprocs processes operating on $nfiles files. A
-# TEST transaction consists of adding the same key/data pair to some random
-# TEST number of these files. We generate a bimodal distribution in key size
-# TEST with 70% of the keys being small (1-10 characters) and the remaining
-# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
-# TEST If we generate a key, we first check to make sure that the key is not
-# TEST already in the dataset. If it is, we do a lookup.
-#
-# XXX
-# This test uses grow-only files currently!
-proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
- source ./include.tcl
- global encrypt
- global rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- berkdb srand $rand_init
-
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test044 skipping for env $env"
- return
- }
- if { $encrypt != 0 } {
- puts "Test044 skipping for security"
- return
- }
-
- puts "Test044: system integration test db $method $nprocs processes \
- on $nfiles files"
-
- # Parse options
- set otherargs ""
- set key_avg 10
- set data_avg 20
- set do_exit 0
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -key_avg { incr i; set key_avg [lindex $args $i] }
- -data_avg { incr i; set data_avg [lindex $args $i] }
- -testdir { incr i; set testdir [lindex $args $i] }
- -x.* { set do_exit 1 }
- default {
- lappend otherargs [lindex $args $i]
- }
- }
- }
-
- if { $cont == 0 } {
- # Create the database and open the dictionary
- env_cleanup $testdir
-
- # Create an environment
- puts "\tTest044.a: creating environment and $nfiles files"
- set dbenv [berkdb_env -create -txn -home $testdir]
- error_check_good env_open [is_valid_env $dbenv] TRUE
-
- # Create a bunch of files
- set m $method
-
- for { set i 0 } { $i < $nfiles } { incr i } {
- if { $method == "all" } {
- switch [berkdb random_int 1 2] {
- 1 { set m -btree }
- 2 { set m -hash }
- }
- } else {
- set m $omethod
- }
-
- set db [eval {berkdb_open -env $dbenv -create \
- -mode 0644 $m} $otherargs {test044.$i.db}]
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
- }
- }
-
- # Close the environment
- $dbenv close
-
- if { $do_exit == 1 } {
- return
- }
-
- # Database is created, now fork off the kids.
- puts "\tTest044.b: forking off $nprocs processes and utilities"
- set cycle 1
- set ncycles 3
- while { $cycle <= $ncycles } {
- set dbenv [berkdb_env -create -txn -home $testdir]
- error_check_good env_open [is_valid_env $dbenv] TRUE
-
- # Fire off deadlock detector and checkpointer
- puts "Beginning cycle $cycle"
- set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
- set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
- puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
-
- set pidlist {}
- for { set i 0 } {$i < $nprocs} {incr i} {
- set p [exec $tclsh_path \
- $test_path/sysscript.tcl $testdir \
- $nfiles $key_avg $data_avg $omethod \
- >& $testdir/test044.$i.log &]
- lappend pidlist $p
- }
- set sleep [berkdb random_int 300 600]
- puts \
-"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
- tclsleep $sleep
-
- # Now simulate a crash
- puts "[timestamp] Crashing"
-
- #
- # The environment must remain open until this point to get
- # proper sharing (using the paging file) on Win/9X. [#2342]
- #
- error_check_good env_close [$dbenv close] 0
-
- tclkill $ddpid
- tclkill $cppid
-
- foreach p $pidlist {
- tclkill $p
- }
-
- # Check for test failure
- set errstrings [eval findfail [glob $testdir/test044.*.log]]
- foreach str $errstrings {
- puts "FAIL: error message in log file: $str"
- }
-
- # Now run recovery
- test044_verify $testdir $nfiles
- incr cycle
- }
-}
-
-proc test044_usage { } {
- puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
- puts " [-p procs] -x"
-}
-
-proc test044_verify { dir nfiles } {
- source ./include.tcl
-
- # Save everything away in case something breaks
-# for { set f 0 } { $f < $nfiles } {incr f} {
-# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
-# }
-# foreach f [glob $dir/log.*] {
-# if { [is_substr $f save] == 0 } {
-# file copy -force $f $f.save1
-# }
-# }
-
- # Run recovery and then read through all the database files to make
- # sure that they all look good.
-
- puts "\tTest044.verify: Running recovery and verifying file contents"
- set stat [catch {exec $util_path/db_recover -h $dir} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- }
-
- # Save everything away in case something breaks
-# for { set f 0 } { $f < $nfiles } {incr f} {
-# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
-# }
-# foreach f [glob $dir/log.*] {
-# if { [is_substr $f save] == 0 } {
-# file copy -force $f $f.save2
-# }
-# }
-
- for { set f 0 } { $f < $nfiles } { incr f } {
- set db($f) [berkdb_open $dir/test044.$f.db]
- error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
-
- set cursors($f) [$db($f) cursor]
- error_check_bad $f:cursor_open $cursors($f) NULL
- error_check_good \
- $f:cursor_open [is_substr $cursors($f) $db($f)] 1
- }
-
- for { set f 0 } { $f < $nfiles } { incr f } {
- for {set d [$cursors($f) get -first] } \
- { [string length $d] != 0 } \
- { set d [$cursors($f) get -next] } {
-
- set k [lindex [lindex $d 0] 0]
- set d [lindex [lindex $d 0] 1]
-
- set flist [zero_list $nfiles]
- set r $d
- while { [set ndx [string first : $r]] != -1 } {
- set fnum [string range $r 0 [expr $ndx - 1]]
- if { [lindex $flist $fnum] == 0 } {
- set fl "-set"
- } else {
- set fl "-next"
- }
-
- if { $fl != "-set" || $fnum != $f } {
- if { [string compare $fl "-set"] == 0} {
- set full [$cursors($fnum) \
- get -set $k]
- } else {
- set full [$cursors($fnum) \
- get -next]
- }
- set key [lindex [lindex $full 0] 0]
- set rec [lindex [lindex $full 0] 1]
- error_check_good \
- $f:dbget_$fnum:key $key $k
- error_check_good \
- $f:dbget_$fnum:data $rec $d
- }
-
- set flist [lreplace $flist $fnum $fnum 1]
- incr ndx
- set r [string range $r $ndx end]
- }
- }
- }
-
- for { set f 0 } { $f < $nfiles } { incr f } {
- error_check_good $cursors($f) [$cursors($f) close] 0
- error_check_good db_close:$f [$db($f) close] 0
- }
-}
diff --git a/storage/bdb/test/test045.tcl b/storage/bdb/test/test045.tcl
deleted file mode 100644
index 2b4c517ca86..00000000000
--- a/storage/bdb/test/test045.tcl
+++ /dev/null
@@ -1,126 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test045.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test045
-# TEST Small random tester
-# TEST Runs a number of random add/delete/retrieve operations.
-# TEST Tests both successful conditions and error conditions.
-# TEST
-# TEST Run the random db tester on the specified access method.
-#
-# Options are:
-# -adds <maximum number of keys before you disable adds>
-# -cursors <number of cursors>
-# -dataavg <average data size>
-# -delete <minimum number of keys before you disable deletes>
-# -dups <allow duplicates in file>
-# -errpct <Induce errors errpct of the time>
-# -init <initial number of entries in database>
-# -keyavg <average key size>
-proc test045 { method {nops 10000} args } {
- source ./include.tcl
- global encrypt
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test045 skipping for env $env"
- return
- }
- set args [convert_args $method $args]
- if { $encrypt != 0 } {
- puts "Test045 skipping for security"
- return
- }
- set omethod [convert_method $method]
-
- puts "Test045: Random tester on $method for $nops operations"
-
- # Set initial parameters
- set adds [expr $nops * 10]
- set cursors 5
- set dataavg 40
- set delete $nops
- set dups 0
- set errpct 0
- set init 0
- if { [is_record_based $method] == 1 } {
- set keyavg 10
- } else {
- set keyavg 25
- }
-
- # Process arguments
- set oargs ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -adds { incr i; set adds [lindex $args $i] }
- -cursors { incr i; set cursors [lindex $args $i] }
- -dataavg { incr i; set dataavg [lindex $args $i] }
- -delete { incr i; set delete [lindex $args $i] }
- -dups { incr i; set dups [lindex $args $i] }
- -errpct { incr i; set errpct [lindex $args $i] }
- -init { incr i; set init [lindex $args $i] }
- -keyavg { incr i; set keyavg [lindex $args $i] }
- -extent { incr i;
- lappend oargs "-extent" "100" }
- default { lappend oargs [lindex $args $i] }
- }
- }
-
- # Create the database and and initialize it.
- set root $testdir/test045
- set f $root.db
- env_cleanup $testdir
-
- # Run the script with 3 times the number of initial elements to
- # set it up.
- set db [eval {berkdb_open \
- -create -mode 0644 $omethod} $oargs {$f}]
- error_check_good dbopen:$f [is_valid_db $db] TRUE
-
- set r [$db close]
- error_check_good dbclose:$f $r 0
-
- # We redirect standard out, but leave standard error here so we
- # can see errors.
-
- puts "\tTest045.a: Initializing database"
- if { $init != 0 } {
- set n [expr 3 * $init]
- exec $tclsh_path \
- $test_path/dbscript.tcl $method $f $n \
- 1 $init $n $keyavg $dataavg $dups 0 -1 \
- > $testdir/test045.init
- }
- # Check for test failure
- set initerrs [findfail $testdir/test045.init]
- foreach str $initerrs {
- puts "FAIL: error message in .init file: $str"
- }
-
- puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
- # Now the database is initialized, run a test
- puts "$tclsh_path\
- $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
- $keyavg $dataavg $dups $errpct > $testdir/test045.log"
-
- exec $tclsh_path \
- $test_path/dbscript.tcl $method $f \
- $nops $cursors $delete $adds $keyavg \
- $dataavg $dups $errpct \
- > $testdir/test045.log
-
- # Check for test failure
- set logerrs [findfail $testdir/test045.log]
- foreach str $logerrs {
- puts "FAIL: error message in log file: $str"
- }
-}
diff --git a/storage/bdb/test/test046.tcl b/storage/bdb/test/test046.tcl
deleted file mode 100644
index 63d0ec7c486..00000000000
--- a/storage/bdb/test/test046.tcl
+++ /dev/null
@@ -1,813 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test046.tcl,v 11.36 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test046
-# TEST Overwrite test of small/big key/data with cursor checks.
-proc test046 { method args } {
- global alphabet
- global errorInfo
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "\tTest046: Overwrite test with cursor and small/big key/data."
- puts "\tTest046:\t$method $args"
-
- if { [is_rrecno $method] == 1} {
- puts "\tTest046: skipping for method $method."
- return
- }
-
- set key "key"
- set data "data"
- set txn ""
- set flags ""
-
- if { [is_record_based $method] == 1} {
- set key ""
- }
-
- puts "\tTest046: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test046
- set env NULL
- } else {
- set testfile test046
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 $args $omethod"
- set db [eval {berkdb_open} $oflags $testfile.a.db]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # keep nkeys even
- set nkeys 20
-
- # Fill page w/ small key/data pairs
- puts "\tTest046: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { [is_record_based $method] == 1} {
- set ret [eval {$db put} $txn {$i $data$i}]
- } elseif { $i < 10 } {
- set ret [eval {$db put} $txn [set key]00$i \
- [set data]00$i]
- } elseif { $i < 100 } {
- set ret [eval {$db put} $txn [set key]0$i \
- [set data]0$i]
- } else {
- set ret [eval {$db put} $txn {$key$i $data$i}]
- }
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # open curs to db
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- # get db order of keys
- for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
- set ret [$dbc get -next]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- puts "\tTest046.a: Deletes by key."
- puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
- # get key in middle of page, call this the nth set curr to it
- set i [expr $nkeys/2]
- set ret [$dbc get -set $key_set($i)]
- error_check_bad dbc_get:set [llength $ret] 0
- set curr $ret
-
- # delete before cursor(n-1), make sure it is gone
- set i [expr $i - 1]
- error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
-
- # use set_range to get first key starting at n-1, should
- # give us nth--but only works for btree
- if { [is_btree $method] == 1 } {
- set ret [$dbc get -set_range $key_set($i)]
- } else {
- if { [is_record_based $method] == 1 } {
- set ret [$dbc get -set $key_set($i)]
- error_check_good \
- dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
- #error_check_good \
- # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
- #error_check_good \
- # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
- } else {
- set ret [$dbc get -set $key_set($i)]
- error_check_good dbc_get:deleted [llength $ret] 0
- }
- set ret [$dbc get -set $key_set([incr i])]
- incr i -1
- }
- error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
- error_check_good dbc_get(match):set $ret $curr
-
- puts "\t\tTest046.a.2: Delete cursor item by key."
- # nth key, which cursor should be on now
- set i [incr i]
- set ret [eval {$db del} $txn {$key_set($i)}]
- error_check_good db_del $ret 0
-
- # this should return n+1 key/data, curr has nth key/data
- if { [string compare $omethod "-btree"] == 0 } {
- set ret [$dbc get -set_range $key_set($i)]
- } else {
- if { [is_record_based $method] == 1 } {
- set ret [$dbc get -set $key_set($i)]
- error_check_good \
- dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
- #error_check_good \
- # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
- #error_check_good \
- # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
- } else {
- set ret [$dbc get -set $key_set($i)]
- error_check_good dbc_get:deleted [llength $ret] 0
- }
- set ret [$dbc get -set $key_set([expr $i+1])]
- }
- error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
- error_check_bad dbc_get(no-match):set_range $ret $curr
-
- puts "\t\tTest046.a.3: Delete item after cursor."
- # we'll delete n+2, since we have deleted n-1 and n
- # i still equal to nth, cursor on n+1
- set i [incr i]
- set ret [$dbc get -set $key_set($i)]
- error_check_bad dbc_get:set [llength $ret] 0
- set curr [$dbc get -next]
- error_check_bad dbc_get:next [llength $curr] 0
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $curr] 0
- # delete *after* cursor pos.
- error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
-
- # make sure item is gone, try to get it
- if { [string compare $omethod "-btree"] == 0} {
- set ret [$dbc get -set_range $key_set($i)]
- } else {
- if { [is_record_based $method] == 1 } {
- set ret [$dbc get -set $key_set($i)]
- error_check_good \
- dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
- #error_check_good \
- # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
- #error_check_good \
- # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
- } else {
- set ret [$dbc get -set $key_set($i)]
- error_check_good dbc_get:deleted [llength $ret] 0
- }
- set ret [$dbc get -set $key_set([expr $i +1])]
- }
- error_check_bad dbc_get:set(_range) [llength $ret] 0
- error_check_bad dbc_get:set(_range) $ret $curr
- error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
- $key_set([expr $i+1])
-
- puts "\tTest046.b: Deletes by cursor."
- puts "\t\tTest046.b.1: Delete, do DB_NEXT."
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- set i [expr $i+2]
- # i = n+4
- error_check_good dbc_get:next(match) \
- [lindex [lindex $ret 0] 0] $key_set($i)
-
- puts "\t\tTest046.b.2: Delete, do DB_PREV."
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $ret] 0
- set i [expr $i-3]
- # i = n+1 (deleted all in between)
- error_check_good dbc_get:prev(match) \
- [lindex [lindex $ret 0] 0] $key_set($i)
-
- puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
- error_check_good dbc:del [$dbc del] 0
- # we just deleted, so current item should be KEYEMPTY, throws err
- set ret [$dbc get -current]
- error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
- #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
- #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
-
- puts "\tTest046.c: Inserts (before/after), by key then cursor."
- puts "\t\tTest046.c.1: Insert by key before the cursor."
- # i is at curs pos, i=n+1, we want to go BEFORE
- set i [incr i -1]
- set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
- error_check_good db_put:before $ret 0
-
- puts "\t\tTest046.c.2: Insert by key after the cursor."
- set i [incr i +2]
- set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
- error_check_good db_put:after $ret 0
-
- puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
- # cursor is on n+1, we'll change i to match
- set i [incr i -1]
-
- error_check_good dbc:close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db:close [$db close] 0
- if { [is_record_based $method] == 1} {
- puts "\t\tSkipping the rest of test for method $method."
- puts "\tTest046 ($method) complete."
- return
- } else {
- # Reopen without printing __db_errs.
- set db [eval {berkdb_open_noerr} $oflags $testfile.a.db]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_valid_cursor $dbc $db] TRUE
-
- # should fail with EINVAL (deleted cursor)
- set errorCode NONE
- error_check_good catch:put:before 1 \
- [catch {$dbc put -before $data_set($i)} ret]
- error_check_good dbc_put:deleted:before \
- [is_substr $errorCode "EINVAL"] 1
-
- # should fail with EINVAL
- set errorCode NONE
- error_check_good catch:put:after 1 \
- [catch {$dbc put -after $data_set($i)} ret]
- error_check_good dbc_put:deleted:after \
- [is_substr $errorCode "EINVAL"] 1
-
- puts "\t\tTest046.c.4:\
- Insert by cursor before/after existent cursor."
- # can't use before after w/o dup except renumber in recno
- # first, restore an item so they don't fail
- #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
- #error_check_good db_put $ret 0
-
- #set ret [$dbc get -set $key_set($i)]
- #error_check_bad dbc_get:set [llength $ret] 0
- #set i [incr i -2]
- # i = n - 1
- #set ret [$dbc get -prev]
- #set ret [$dbc put -before $key_set($i) $data_set($i)]
- #error_check_good dbc_put:before $ret 0
- # cursor pos is adjusted to match prev, recently inserted
- #incr i
- # i = n
- #set ret [$dbc put -after $key_set($i) $data_set($i)]
- #error_check_good dbc_put:after $ret 0
- }
-
- # For the next part of the test, we need a db with no dups to test
- # overwrites
- puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
- error_check_good dbc:close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db:close [$db close] 0
-
- set db [eval {berkdb_open} $oflags $testfile.d.db]
- error_check_good dbopen [is_valid_db $db] TRUE
- # Fill page w/ small key/data pairs
- puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i < $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i $data$i}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- set nkeys 20
-
- # Prepare cursor on item
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
-
- # Prepare unique big/small values for an initial
- # and an overwrite set of key/data
- foreach ptype {init over} {
- foreach size {big small} {
- if { [string compare $size big] == 0 } {
- set key_$ptype$size \
- KEY_$size[repeat alphabet 250]
- set data_$ptype$size \
- DATA_$size[repeat alphabet 250]
- } else {
- set key_$ptype$size \
- KEY_$size[repeat alphabet 10]
- set data_$ptype$size \
- DATA_$size[repeat alphabet 10]
- }
- }
- }
-
- set i 0
- # Do all overwrites for key and cursor
- foreach type {key_over curs_over} {
- # Overwrite (i=initial) four different kinds of pairs
- incr i
- puts "\tTest046.d: Overwrites $type."
- foreach i_pair {\
- {small small} {big small} {small big} {big big} } {
- # Overwrite (w=write) with four different kinds of data
- foreach w_pair {\
- {small small} {big small} {small big} {big big} } {
-
- # we can only overwrite if key size matches
- if { [string compare [lindex \
- $i_pair 0] [lindex $w_pair 0]] != 0} {
- continue
- }
-
- # first write the initial key/data
- set ret [$dbc put -keyfirst \
- key_init[lindex $i_pair 0] \
- data_init[lindex $i_pair 1]]
- error_check_good \
- dbc_put:curr:init:$i_pair $ret 0
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good dbc_get:curr:data \
- [lindex [lindex $ret 0] 1] \
- data_init[lindex $i_pair 1]
-
- # Now, try to overwrite: dups not supported in
- # this db
- if { [string compare $type key_over] == 0 } {
- puts "\t\tTest046.d.$i: Key\
- Overwrite:($i_pair) by ($w_pair)."
- set ret [eval {$db put} $txn \
- $"key_init[lindex $i_pair 0]" \
- $"data_over[lindex $w_pair 1]"]
- error_check_good \
- dbput:over:i($i_pair):o($w_pair) $ret 0
- # check value
- set ret [eval {$db get} $txn \
- $"key_init[lindex $i_pair 0]"]
- error_check_bad \
- db:get:check [llength $ret] 0
- error_check_good db:get:compare_data \
- [lindex [lindex $ret 0] 1] \
- $"data_over[lindex $w_pair 1]"
- } else {
- # This is a cursor overwrite
- puts \
- "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
- set ret [$dbc put -current \
- $"data_over[lindex $w_pair 1]"]
- error_check_good \
- dbcput:over:i($i_pair):o($w_pair) $ret 0
- # check value
- set ret [$dbc get -current]
- error_check_bad \
- dbc_get:curr [llength $ret] 0
- error_check_good dbc_get:curr:data \
- [lindex [lindex $ret 0] 1] \
- $"data_over[lindex $w_pair 1]"
- }
- } ;# foreach write pair
- } ;# foreach initial pair
- } ;# foreach type big/small
-
- puts "\tTest046.d.3: Cleanup for next part of test."
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- if { [is_rbtree $method] == 1} {
- puts "\tSkipping the rest of Test046 for method $method."
- puts "\tTest046 complete."
- return
- }
-
- puts "\tTest046.e.1: Open db with sorted dups."
- set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # keep nkeys even
- set nkeys 20
- set ndups 20
-
- # Fill page w/ small key/data pairs
- puts "\tTest046.e.2:\
- Put $nkeys small key/data pairs and $ndups sorted dups."
- for { set i 0 } { $i < $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { $i < 10 } {
- set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
- } else {
- set ret [eval {$db put} $txn {$key$i $data$i}]
- }
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # open curs to db
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- # get db order of keys
- for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
- set ret [$dbc get -next]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- # put 20 sorted duplicates on key in middle of page
- set i [expr $nkeys/2]
- set ret [$dbc get -set $key_set($i)]
- error_check_bad dbc_get:set [llength $ret] 0
-
- set keym $key_set($i)
-
- for { set i 0 } { $i < $ndups } { incr i } {
- if { $i < 10 } {
- set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
- } else {
- set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
- }
- error_check_good db_put:DUP($i) $ret 0
- }
-
- puts "\tTest046.e.3: Check duplicate duplicates"
- set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
- error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
-
- # get dup ordering
- for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
- set ret [$dbc get -nextdup] } {
- set dup_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- # put cursor on item in middle of dups
- set i [expr $ndups/2]
- set ret [$dbc get -get_both $keym $dup_set($i)]
- error_check_bad dbc_get:get_both [llength $ret] 0
-
- puts "\tTest046.f: Deletes by cursor."
- puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
- set ret [$dbc get -current]
- error_check_bad dbc_get:current [llength $ret] 0
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- error_check_good \
- dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
-
- puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $ret] 0
- set i [incr i -2]
- error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
-
- puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
- #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
- #error_check_good \
- # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # restore deleted keys
- error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
- error_check_good db_put:2 [eval {$db put} $txn \
- {$keym $dup_set([incr i])}] 0
- error_check_good db_put:3 [eval {$db put} $txn \
- {$keym $dup_set([incr i])}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # tested above
-
- # Reopen database without __db_err, reset cursor
- error_check_good dbclose [$db close] 0
- set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set ret [$dbc get -set $keym]
- error_check_bad dbc_get:set [llength $ret] 0
- set ret2 [$dbc get -current]
- error_check_bad dbc_get:current [llength $ret2] 0
- # match
- error_check_good dbc_get:current/set(match) $ret $ret2
- # right one?
- error_check_good \
- dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
-
- # cursor is on first dup
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- # now on second dup
- error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
- # check cursor
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good \
- dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
-
- puts "\tTest046.g: Inserts."
- puts "\t\tTest046.g.1: Insert by key before cursor."
- set i 0
-
- # use "spam" to prevent a duplicate duplicate.
- set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
- error_check_good db_put:before $ret 0
- # make sure cursor was maintained
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good \
- dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
-
- puts "\t\tTest046.g.2: Insert by key after cursor."
- set i [expr $i + 2]
- # use "eggs" to prevent a duplicate duplicate
- set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
- error_check_good db_put:after $ret 0
- # make sure cursor was maintained
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good \
- dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
-
- puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
- # should return EINVAL (dupsort specified)
- error_check_good dbc_put:before:catch \
- [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
- error_check_good \
- dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
- error_check_good dbc_put:after:catch \
- [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
- error_check_good \
- dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
-
- puts "\tTest046.h: Cursor overwrites."
- puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
- set ret [$dbc get -set $keym]
- error_check_bad dbc_get:set [llength $ret] 0
- error_check_good \
- catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
- error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
-
- puts "\t\tTest046.h.2: New db (no dupsort)."
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} \
- $oflags -dup $testfile.h.db]
- error_check_good db_open [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- for {set i 0} {$i < $nkeys} {incr i} {
- if { $i < 10 } {
- set ret [eval {$db put} $txn {key0$i datum0$i}]
- error_check_good db_put $ret 0
- } else {
- set ret [eval {$db put} $txn {key$i datum$i}]
- error_check_good db_put $ret 0
- }
- if { $i == 0 } {
- for {set j 0} {$j < $ndups} {incr j} {
- if { $i < 10 } {
- set keyput key0$i
- } else {
- set keyput key$i
- }
- if { $j < 10 } {
- set ret [eval {$db put} $txn \
- {$keyput DUP_datum0$j}]
- } else {
- set ret [eval {$db put} $txn \
- {$keyput DUP_datum$j}]
- }
- error_check_good dbput:dup $ret 0
- }
- }
- }
-
- for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
- set ret [$dbc get -next]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- for {set i 0; set ret [$dbc get -set key00]} {\
- [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
- set dup_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
- set i 0
- set keym key0$i
- set ret [$dbc get -set $keym]
- error_check_bad dbc_get:set [llength $ret] 0
- error_check_good \
- dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
-
- set ret [$dbc get -nextdup]
- error_check_bad dbc_get:nextdup [llength $ret] 0
- error_check_good dbc_get:nextdup(match) \
- [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
-
- puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
- set ret [$dbc put -before BEFOREPUT]
- error_check_good dbc_put:before $ret 0
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good \
- dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
- # make sure that this is actually a dup w/ dup before
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $ret] 0
- error_check_good dbc_get:prev:match \
- [lindex [lindex $ret 0] 1] $dup_set($i)
- set ret [$dbc get -prev]
- # should not be a dup
- error_check_bad dbc_get:prev(no_dup) \
- [lindex [lindex $ret 0] 0] $keym
-
- puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
- set ret [$dbc get -set $keym]
-
- # delete next 3 when fix
- #puts "[$dbc get -current]\
- # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
- #set ret [$dbc get -set $keym]
-
- error_check_bad dbc_get:set [llength $ret] 0
- set ret [$dbc put -after AFTERPUT]
- error_check_good dbc_put:after $ret 0
- #puts [$dbc get -current]
-
- # delete next 3 when fix
- #set ret [$dbc get -set $keym]
- #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
- #set ret [$dbc get -set AFTERPUT]
- #set ret [$dbc get -set $keym]
- #set ret [$dbc get -next]
- #puts $ret
-
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
- set ret [$dbc get -prev]
- # now should be on first item (non-dup) of keym
- error_check_bad dbc_get:prev1 [llength $ret] 0
- error_check_good \
- dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- error_check_good \
- dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- # this is the dup we added previously
- error_check_good \
- dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
-
- # now get rid of the dups we added
- error_check_good dbc_del [$dbc del] 0
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev2 [llength $ret] 0
- error_check_good dbc_del2 [$dbc del] 0
- # put cursor on first dup item for the rest of test
- set ret [$dbc get -set $keym]
- error_check_bad dbc_get:first [llength $ret] 0
- error_check_good \
- dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
-
- puts "\t\tTest046.h.5: Overwrite small by small."
- set ret [$dbc put -current DATA_OVERWRITE]
- error_check_good dbc_put:current:overwrite $ret 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current(put,small/small) \
- [lindex [lindex $ret 0] 1] DATA_OVERWRITE
-
- puts "\t\tTest046.h.6: Overwrite small with big."
- set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
- error_check_good dbc_put:current:overwrite:big $ret 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current(put,small/big) \
- [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
-
- puts "\t\tTest046.h.7: Overwrite big with big."
- set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
- error_check_good dbc_put:current:overwrite(2):big $ret 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current(put,big/big) \
- [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
-
- puts "\t\tTest046.h.8: Overwrite big with small."
- set ret [$dbc put -current DATA_OVERWRITE2]
- error_check_good dbc_put:current:overwrite:small $ret 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current(put,big/small) \
- [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
-
- puts "\tTest046.i: Cleaning up from test."
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest046 complete."
-}
diff --git a/storage/bdb/test/test047.tcl b/storage/bdb/test/test047.tcl
deleted file mode 100644
index 48b6fc759ab..00000000000
--- a/storage/bdb/test/test047.tcl
+++ /dev/null
@@ -1,258 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test047.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test047
-# TEST DBcursor->c_get get test with SET_RANGE option.
-proc test047 { method args } {
- source ./include.tcl
-
- set tnum 047
- set args [convert_args $method $args]
-
- if { [is_btree $method] != 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
-
- set method "-btree"
-
- puts "\tTest$tnum: Test of SET_RANGE interface to DB->c_get ($method)."
-
- set key "key"
- set data "data"
- set txn ""
- set flags ""
-
- puts "\tTest$tnum.a: Create $method database."
- set eindex [lsearch -exact $args "-env"]
- set txnenv 0
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set testfile1 $testdir/test$tnum.a.db
- set testfile2 $testdir/test$tnum.b.db
- set env NULL
- } else {
- set testfile test$tnum.db
- set testfile1 test$tnum.a.db
- set testfile2 test$tnum.b.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 -dup $args $method"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 20
- # Fill page w/ small key/data pairs
- #
- puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
- for { set i 0 } { $i < $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i $data$i}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # open curs to db
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- puts "\tTest$tnum.c: Get data with SET_RANGE, then delete by cursor."
- set i 0
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get:set_range [llength $ret] 0
- set curr $ret
-
- # delete by cursor, make sure it is gone
- error_check_good dbc_del [$dbc del] 0
-
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
- error_check_bad dbc_get(no-match):set_range $ret $curr
-
- puts "\tTest$tnum.d: \
- Use another cursor to fix item on page, delete by db."
- set dbcurs2 [eval {$db cursor} $txn]
- error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
-
- set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
- error_check_bad dbc_get(2):set [llength $ret] 0
- set curr $ret
- error_check_good db:del [eval {$db del} $txn \
- {[lindex [lindex $ret 0] 0]}] 0
-
- # make sure item is gone
- set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
- error_check_bad dbc2_get:set_range [llength $ret] 0
- error_check_bad dbc2_get:set_range $ret $curr
-
- puts "\tTest$tnum.e: Close for second part of test, close db/cursors."
- error_check_good dbc:close [$dbc close] 0
- error_check_good dbc2:close [$dbcurs2 close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good dbclose [$db close] 0
-
- # open db
- set db [eval {berkdb_open} $oflags $testfile1]
- error_check_good dbopen2 [is_valid_db $db] TRUE
-
- set nkeys 10
- puts "\tTest$tnum.f: Fill page with $nkeys pairs, one set of dups."
- for {set i 0} { $i < $nkeys } {incr i} {
- # a pair
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i $data$i}]
- error_check_good dbput($i) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set j 0
- for {set i 0} { $i < $nkeys } {incr i} {
- # a dup set for same 1 key
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
- error_check_good dbput($i):dup $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- puts "\tTest$tnum.g: \
- Get dups key w/ SET_RANGE, pin onpage with another cursor."
- set i 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get:set_range [llength $ret] 0
-
- set dbc2 [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
- set ret2 [$dbc2 get -set_range $key$i]
- error_check_bad dbc2_get:set_range [llength $ret] 0
-
- error_check_good dbc_compare $ret $ret2
- puts "\tTest$tnum.h: \
- Delete duplicates' key, use SET_RANGE to get next dup."
- set ret [$dbc2 del]
- error_check_good dbc2_del $ret 0
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get:set_range [llength $ret] 0
- error_check_bad dbc_get:set_range $ret $ret2
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good dbc2_close [$dbc2 close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $oflags $testfile2]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 10
- set ndups 1000
-
- puts "\tTest$tnum.i: Fill page with $nkeys pairs and $ndups dups."
- for {set i 0} { $i < $nkeys } { incr i} {
- # a pair
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i $data$i}]
- error_check_good dbput $ret 0
-
- # dups for single pair
- if { $i == 0} {
- for {set j 0} { $j < $ndups } { incr j } {
- set ret [eval {$db put} $txn \
- {$key$i DUP_$data$i:$j}]
- error_check_good dbput:dup $ret 0
- }
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- set i 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- set dbc2 [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
- puts "\tTest$tnum.j: \
- Get key of first dup with SET_RANGE, fix with 2 curs."
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get:set_range [llength $ret] 0
-
- set ret2 [$dbc2 get -set_range $key$i]
- error_check_bad dbc2_get:set_range [llength $ret] 0
- set curr $ret2
-
- error_check_good dbc_compare $ret $ret2
-
- puts "\tTest$tnum.k: Delete item by cursor, use SET_RANGE to verify."
- set ret [$dbc2 del]
- error_check_good dbc2_del $ret 0
- set ret [$dbc get -set_range $key$i]
- error_check_bad dbc_get:set_range [llength $ret] 0
- error_check_bad dbc_get:set_range $ret $curr
-
- puts "\tTest$tnum.l: Cleanup."
- error_check_good dbc_close [$dbc close] 0
- error_check_good dbc2_close [$dbc2 close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum complete."
-}
diff --git a/storage/bdb/test/test048.tcl b/storage/bdb/test/test048.tcl
deleted file mode 100644
index db73b2b6dcc..00000000000
--- a/storage/bdb/test/test048.tcl
+++ /dev/null
@@ -1,171 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test048.tcl,v 11.22 2004/05/13 18:51:43 mjc Exp $
-#
-# TEST test048
-# TEST Cursor stability across Btree splits.
-proc test048 { method args } {
- global errorCode
- global is_je_test
- source ./include.tcl
-
- set tnum 048
- set args [convert_args $method $args]
-
- if { [is_btree $method] != 1 } {
- puts "Test$tnum skipping for method $method."
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- incr pgindex
- if { [lindex $args $pgindex] > 8192 } {
- puts "Test048: Skipping for large pagesizes"
- return
- }
- }
-
- set method "-btree"
-
- puts "\tTest$tnum: Test of cursor stability across btree splits."
-
- set key "key"
- set data "data"
- set txn ""
- set flags ""
-
- puts "\tTest$tnum.a: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 $args $method"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 5
- # Fill page w/ small key/data pairs, keep at leaf
- #
- puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
- for { set i 0 } { $i < $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {key000$i $data$i}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # get db ordering, set cursors
- puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for {set i 0; set ret [$db get key000$i]} {\
- $i < $nkeys && [llength $ret] != 0} {\
- incr i; set ret [$db get key000$i]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [eval {$db cursor} $txn]
- set dbc_set($i) $dbc
- error_check_good db_cursor:$i \
- [is_valid_cursor $dbc_set($i) $db] TRUE
- set ret [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc_set($i)_get:set [llength $ret] 0
- }
-
- # if mkeys is above 1000, need to adjust below for lexical order
- set mkeys 1000
- puts "\tTest$tnum.d: Add $mkeys pairs to force split."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 100 } {
- set ret [eval {$db put} $txn {key0$i $data$i}]
- } elseif { $i >= 10 } {
- set ret [eval {$db put} $txn {key00$i $data$i}]
- } else {
- set ret [eval {$db put} $txn {key000$i $data$i}]
- }
- error_check_good dbput:more $ret 0
- }
-
- puts "\tTest$tnum.e: Make sure split happened."
- # XXX We cannot call stat with active txns or we deadlock.
- if { $txnenv != 1 && !$is_je_test } {
- error_check_bad stat:check-split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
- }
-
- puts "\tTest$tnum.f: Check to see that cursors maintained reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- puts "\tTest$tnum.g: Delete added keys to force reverse split."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 100 } {
- error_check_good db_del:$i \
- [eval {$db del} $txn {key0$i}] 0
- } elseif { $i >= 10 } {
- error_check_good db_del:$i \
- [eval {$db del} $txn {key00$i}] 0
- } else {
- error_check_good db_del:$i \
- [eval {$db del} $txn {key000$i}] 0
- }
- }
-
- puts "\tTest$tnum.h: Verify cursor reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- puts "\tTest$tnum.i: Cleanup."
- # close cursors
- for {set i 0} { $i < $nkeys } {incr i} {
- error_check_good dbc_close:$i [$dbc_set($i) close] 0
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- puts "\tTest$tnum.j: Verify reverse split."
- error_check_good stat:check-reverse_split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
-
- error_check_good dbclose [$db close] 0
-
- puts "\tTest$tnum complete."
-}
diff --git a/storage/bdb/test/test049.tcl b/storage/bdb/test/test049.tcl
deleted file mode 100644
index f8d173380c8..00000000000
--- a/storage/bdb/test/test049.tcl
+++ /dev/null
@@ -1,184 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test049.tcl,v 11.24 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test049
-# TEST Cursor operations on uninitialized cursors.
-proc test049 { method args } {
- global errorInfo
- global errorCode
- source ./include.tcl
-
- set tnum 049
- set renum [is_rrecno $method]
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "\tTest$tnum: Test of cursor routines with uninitialized cursors."
-
- set key "key"
- set data "data"
- set txn ""
- set flags ""
- set rflags ""
-
- if { [is_record_based $method] == 1 } {
- set key ""
- }
-
- puts "\tTest$tnum.a: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 $rflags $omethod $args"
- if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
- append oflags " -dup"
- }
- set db [eval {berkdb_open_noerr} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 10
- puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i $data$i}]
- error_check_good dbput:$i $ret 0
- if { $i == 1 } {
- for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
- set ret [eval {$db put} $txn \
- {$key$i DUPLICATE$j}]
- error_check_good dbput:dup:$j $ret 0
- }
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # DBC GET
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc_u [eval {$db cursor} $txn]
- error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
-
- puts "\tTest$tnum.c: Test dbc->get interfaces..."
- set i 0
- foreach flag { current first last next prev nextdup} {
- puts "\t\t...dbc->get($flag)"
- catch {$dbc_u get -$flag} ret
- error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
- }
-
- foreach flag { set set_range get_both} {
- puts "\t\t...dbc->get($flag)"
- if { [string compare $flag get_both] == 0} {
- catch {$dbc_u get -$flag $key$i data0} ret
- } else {
- catch {$dbc_u get -$flag $key$i} ret
- }
- error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
- }
-
- puts "\t\t...dbc->get(current, partial)"
- catch {$dbc_u get -current -partial {0 0}} ret
- error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
-
- puts "\t\t...dbc->get(current, rmw)"
- catch {$dbc_u get -rmw -current } ret
- error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
-
- puts "\tTest$tnum.d: Test dbc->put interface..."
- # partial...depends on another
- foreach flag { after before current keyfirst keylast } {
- puts "\t\t...dbc->put($flag)"
- if { [string match key* $flag] == 1 } {
- if { [is_record_based $method] == 1 } {
- # keyfirst/keylast not allowed in recno
- puts "\t\t...Skipping dbc->put($flag) for $method."
- continue
- } else {
- # keyfirst/last should succeed
- puts "\t\t...dbc->put($flag)...should succeed for $method"
- error_check_good dbcput:$flag \
- [$dbc_u put -$flag $key$i data0] 0
-
- # now uninitialize cursor
- error_check_good dbc_close [$dbc_u close] 0
- set dbc_u [eval {$db cursor} $txn]
- error_check_good \
- db_cursor [is_substr $dbc_u $db] 1
- }
- } elseif { [string compare $flag before ] == 0 ||
- [string compare $flag after ] == 0 } {
- if { [is_record_based $method] == 0 &&
- [is_rbtree $method] == 0} {
- set ret [$dbc_u put -$flag data0]
- error_check_good "$dbc_u:put:-$flag" $ret 0
- } elseif { $renum == 1 } {
- # Renumbering recno will return a record number
- set currecno \
- [lindex [lindex [$dbc_u get -current] 0] 0]
- set ret [$dbc_u put -$flag data0]
- if { [string compare $flag after] == 0 } {
- error_check_good "$dbc_u put $flag" \
- $ret [expr $currecno + 1]
- } else {
- error_check_good "$dbc_u put $flag" \
- $ret $currecno
- }
- } else {
- puts "\t\tSkipping $flag for $method"
- }
- } else {
- set ret [$dbc_u put -$flag data0]
- error_check_good "$dbc_u:put:-$flag" $ret 0
- }
- }
- # and partial
- puts "\t\t...dbc->put(partial)"
- catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
- error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
-
- # XXX dbc->dup, db->join (dbc->get join_item)
- # dbc del
- puts "\tTest$tnum.e: Test dbc->del interface."
- catch {$dbc_u del} ret
- error_check_good dbc_del [is_substr $errorCode EINVAL] 1
-
- error_check_good dbc_close [$dbc_u close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum complete."
-}
diff --git a/storage/bdb/test/test050.tcl b/storage/bdb/test/test050.tcl
deleted file mode 100644
index 72be5af67f9..00000000000
--- a/storage/bdb/test/test050.tcl
+++ /dev/null
@@ -1,221 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test050.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test050
-# TEST Overwrite test of small/big key/data with cursor checks for Recno.
-proc test050 { method args } {
- global alphabet
- global errorInfo
- global errorCode
- source ./include.tcl
-
- set tstn 050
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_rrecno $method] != 1 } {
- puts "Test$tstn skipping for method $method."
- return
- }
-
- puts "\tTest$tstn:\
- Overwrite test with cursor and small/big key/data ($method)."
-
- set data "data"
- set txn ""
- set flags ""
-
- puts "\tTest$tstn: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test0$tstn.db
- set env NULL
- } else {
- set testfile test0$tstn.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 $args $omethod"
- set db [eval {berkdb_open_noerr} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # keep nkeys even
- set nkeys 20
-
- # Fill page w/ small key/data pairs
- #
- puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # open curs to db
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # get db order of keys
- for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
- set ret [$dbc get -next]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- # verify ordering: should be unnecessary, but hey, why take chances?
- # key_set is zero indexed but keys start at 1
- for {set i 0} { $i < $nkeys } {incr i} {
- error_check_good \
- verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
- }
-
- puts "\tTest$tstn.a: Inserts before/after by cursor."
- puts "\t\tTest$tstn.a.1:\
- Insert with uninitialized cursor (should fail)."
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
- catch {$dbc put -before DATA1} ret
- error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
-
- catch {$dbc put -after DATA2} ret
- error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
-
- puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
- error_check_good dbc_del [$dbc del] 0
- set ret [$dbc put -current DATAOVER1]
- error_check_good dbc_put:current:deleted $ret 0
-
- puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
- set currecno [lindex [lindex [$dbc get -current] 0] 0]
- set ret [$dbc put -before DATAPUTBEFORE]
- error_check_good dbc_put:before $ret $currecno
- set old1 [$dbc get -next]
- error_check_bad dbc_get:next [llength $old1] 0
- error_check_good \
- dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
-
- puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
- error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
- set currecno [lindex [lindex [$dbc get -current] 0] 0]
- set ret [$dbc put -after DATAPUTAFTER]
- error_check_good dbc_put:after $ret [expr $currecno + 1]
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $ret] 0
- error_check_good \
- dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
-
- puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
- # should be $nkeys + 2 keys, starting at 1
- for {set i 1; set ret [$dbc get -first]} { \
- $i <= $nkeys && [llength $ret] != 0 } {\
- incr i; set ret [$dbc get -next]} {
- error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
- }
-
- # tested above
-
- puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
- # For the next part of the test, we need a db with no dups to test
- # overwrites
- #
- # we should have ($nkeys + 2) keys, ordered:
- # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
- #
- # Prepare cursor on item
- #
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
-
- # Prepare unique big/small values for an initial
- # and an overwrite set of data
- set databig DATA_BIG_[repeat alphabet 250]
- set datasmall DATA_SMALL
-
- # Now, we want to overwrite data:
- # by key and by cursor
- # 1. small by small
- # 2. small by big
- # 3. big by small
- # 4. big by big
- #
- set i 0
- # Do all overwrites for key and cursor
- foreach type { by_key by_cursor } {
- incr i
- puts "\tTest$tstn.b.$i: Overwrites $type."
- foreach pair { {small small} \
- {small big} {big small} {big big} } {
- # put in initial type
- set data $data[lindex $pair 0]
- set ret [$dbc put -current $data]
- error_check_good dbc_put:curr:init:($pair) $ret 0
-
- # Now, try to overwrite: dups not supported in this db
- if { [string compare $type by_key] == 0 } {
- puts "\t\tTest$tstn.b.$i:\
- Overwrite:($pair):$type"
- set ret [eval {$db put} $txn \
- 1 {OVER$pair$data[lindex $pair 1]}]
- error_check_good dbput:over:($pair) $ret 0
- } else {
- # This is a cursor overwrite
- puts "\t\tTest$tstn.b.$i:\
- Overwrite:($pair) by cursor."
- set ret [$dbc put \
- -current OVER$pair$data[lindex $pair 1]]
- error_check_good dbcput:over:($pair) $ret 0
- }
- } ;# foreach pair
- } ;# foreach type key/cursor
-
- puts "\tTest$tstn.c: Cleanup and close cursor."
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
-}
diff --git a/storage/bdb/test/test051.tcl b/storage/bdb/test/test051.tcl
deleted file mode 100644
index 5e09835e3c2..00000000000
--- a/storage/bdb/test/test051.tcl
+++ /dev/null
@@ -1,222 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test051.tcl,v 11.25 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test051
-# TEST Fixed-length record Recno test.
-# TEST 0. Test various flags (legal and illegal) to open
-# TEST 1. Test partial puts where dlen != size (should fail)
-# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
-# TEST end of record, as well as full replace
-proc test051 { method { args "" } } {
- global fixed_len
- global errorInfo
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test051: Test of the fixed length records."
- if { [is_fixed_length $method] != 1 } {
- puts "Test051: skipping for method $method"
- return
- }
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test051.db
- set testfile1 $testdir/test051a.db
- set env NULL
- } else {
- set testfile test051.db
- set testfile1 test051a.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- set oflags "-create -mode 0644 $args"
-
- # Test various flags (legal and illegal) to open
- puts "\tTest051.a: Test correct flag behavior on open."
- set errorCode NONE
- foreach f { "-dup" "-dup -dupsort" "-recnum" } {
- puts "\t\tTest051.a: Test flag $f"
- set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
- $testfile} ret]
- error_check_good dbopen:flagtest:catch $stat 1
- error_check_good \
- dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
- set errorCode NONE
- }
- set f "-renumber"
- puts "\t\tTest051.a: Test $f"
- if { [is_frecno $method] == 1 } {
- set db [eval {berkdb_open} $oflags $f $omethod $testfile]
- error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
- $db close
- } else {
- error_check_good \
- dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
- $oflags $f $omethod $testfile} ret] 1
- error_check_good \
- dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
- }
-
- # Test partial puts where dlen != size (should fail)
- # it is an error to specify a partial put w/ different
- # dlen and size in fixed length recno/queue
- set key 1
- set data ""
- set txn ""
- set test_char "a"
-
- set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- puts "\tTest051.b: Partial puts with dlen != size."
- foreach dlen { 1 16 20 32 } {
- foreach doff { 0 10 20 32 } {
- # dlen < size
- puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
- size: [expr $dlen+1]"
- set data [repeat $test_char [expr $dlen + 1]]
- error_check_good \
- catch:put 1 [catch {eval {$db put -partial \
- [list $doff $dlen]} $txn {$key $data}} ret]
-
- # We don't get back the server error string just
- # the result.
- if { $eindex == -1 } {
- error_check_good "dbput:partial: dlen < size" \
- [is_substr \
- $errorInfo "Record length error"] 1
- } else {
- error_check_good "dbput:partial: dlen < size" \
- [is_substr $errorCode "EINVAL"] 1
- }
-
- # dlen > size
- puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
- size: [expr $dlen-1]"
- set data [repeat $test_char [expr $dlen - 1]]
- error_check_good \
- catch:put 1 [catch {eval {$db put -partial \
- [list $doff $dlen]} $txn {$key $data}} ret]
- if { $eindex == -1 } {
- error_check_good "dbput:partial: dlen > size" \
- [is_substr \
- $errorInfo "Record length error"] 1
- } else {
- error_check_good "dbput:partial: dlen < size" \
- [is_substr $errorCode "EINVAL"] 1
- }
- }
- }
-
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- $db close
-
- # Partial puts for existent record -- replaces at beg, mid, and
- # end of record, as well as full replace
- puts "\tTest051.f: Partial puts within existent record."
- set db [eval {berkdb_open} $oflags $omethod $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\t\tTest051.f: First try a put and then a full replace."
- set data [repeat "a" $fixed_len]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {1 $data}]
- error_check_good dbput $ret 0
- set ret [eval {$db get} $txn {-recno 1}]
- error_check_good dbget $data [lindex [lindex $ret 0] 1]
-
- set data [repeat "b" $fixed_len]
- set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
- error_check_good dbput $ret 0
- set ret [eval {$db get} $txn {-recno 1}]
- error_check_good dbget $data [lindex [lindex $ret 0] 1]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set data "InitialData"
- set pdata "PUT"
- set dlen [string length $pdata]
- set ilen [string length $data]
- set mid [expr $ilen/2]
-
- # put initial data
- set key 0
-
- set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
- puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
- foreach doff $offlist {
- incr key
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good dbput:init $ret 0
-
- puts "\t\tTest051.g: Replace at offset $doff."
- set ret [eval {$db put -partial [list $doff $dlen]} $txn \
- {$key $pdata}]
- error_check_good dbput:partial $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if { $doff == 0} {
- set beg ""
- set end [string range $data $dlen $ilen]
- } else {
- set beg [string range $data 0 [expr $doff - 1]]
- set end [string range $data [expr $doff + $dlen] $ilen]
- }
- if { $doff > $ilen } {
- # have to put padding between record and inserted
- # string
- set newdata [format %s%s $beg $end]
- set diff [expr $doff - $ilen]
- set nlen [string length $newdata]
- set newdata [binary \
- format a[set nlen]x[set diff]a$dlen $newdata $pdata]
- } else {
- set newdata [make_fixed_length \
- frecno [format %s%s%s $beg $pdata $end]]
- }
- set ret [$db get -recno $key]
- error_check_good compare($newdata,$ret) \
- [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
- }
-
- $db close
-}
diff --git a/storage/bdb/test/test052.tcl b/storage/bdb/test/test052.tcl
deleted file mode 100644
index c7d891d33c7..00000000000
--- a/storage/bdb/test/test052.tcl
+++ /dev/null
@@ -1,269 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test052.tcl,v 11.20 2004/09/20 17:06:16 sue Exp $
-#
-# TEST test052
-# TEST Renumbering record Recno test.
-proc test052 { method args } {
- global alphabet
- global errorInfo
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test052: Test of renumbering recno."
- if { [is_rrecno $method] != 1} {
- puts "Test052: skipping for method $method."
- return
- }
-
- set data "data"
- set txn ""
- set flags ""
-
- puts "\tTest052: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test052.db
- set env NULL
- } else {
- set testfile test052.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags "-create -mode 0644 $args $omethod"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # keep nkeys even
- set nkeys 20
-
- # Fill page w/ small key/data pairs
- puts "\tTest052: Fill page with $nkeys small key/data pairs."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$i $data$i}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # open curs to db
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # get db order of keys
- for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
- set ret [$dbc get -next]} {
- set keys($i) [lindex [lindex $ret 0] 0]
- set darray($i) [lindex [lindex $ret 0] 1]
- incr i
- }
-
- puts "\tTest052: Deletes by key."
- puts "\tTest052.a: Get data with SET, then delete before cursor."
- # get key in middle of page, call this the nth set curr to it
- set i [expr $nkeys/2]
- set k $keys($i)
- set ret [$dbc get -set $k]
- error_check_bad dbc_get:set [llength $ret] 0
- error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
-
- # delete by key before current
- set i [incr i -1]
- error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
- # with renumber, current's data should be constant, but key==--key
- set i [incr i +1]
- error_check_good dbc:data \
- [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
- error_check_good dbc:keys \
- [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
-
- puts "\tTest052.b: Delete cursor item by key."
- set i [expr $nkeys/2 ]
-
- set ret [$dbc get -set $keys($i)]
- error_check_bad dbc:get [llength $ret] 0
- error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
- $darray([expr $i + 1])
- error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
- set ret [$dbc get -current]
-
- # After a delete, cursor should return DB_NOTFOUND.
- error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
- error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
-
- # And the item after the cursor should now be
- # key: $nkeys/2, data: $nkeys/2 + 2
- set ret [$dbc get -next]
- error_check_bad dbc:getnext [llength $ret] 0
- error_check_good dbc:getnext:data \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
- error_check_good dbc:getnext:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
-
- puts "\tTest052.c: Delete item after cursor."
- # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
- set i [expr $nkeys/2]
- # deleting data for key after current (key $nkeys/2 + 1)
- error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
-
- # current should be constant
- set ret [$dbc get -current]
- error_check_bad dbc:get:current [llength $ret] 0
- error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
- $keys($i)
- error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
- $darray([expr $i + 2])
-
- puts "\tTest052: Deletes by cursor."
- puts "\tTest052.d: Delete, do DB_NEXT."
- set i 1
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
- error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
- error_check_good dbc_del [$dbc del] 0
- set ret [$dbc get -current]
- error_check_good dbc_get:current [llength $ret] 0
-
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- error_check_good dbc:get:curs \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
- error_check_good dbc:get:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
-
- # Move one more forward, so we're not on the first item.
- error_check_bad dbc:getnext [llength [$dbc get -next]] 0
-
- puts "\tTest052.e: Delete, do DB_PREV."
- error_check_good dbc:del [$dbc del] 0
- set ret [$dbc get -current]
- error_check_good dbc:get:curr [llength $ret] 0
-
- # next should now reference the record that was previously after
- # old current
- set ret [$dbc get -next]
- error_check_bad get:next [llength $ret] 0
- error_check_good dbc:get:next:data \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
- error_check_good dbc:get:next:keys \
- [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
-
-
- set ret [$dbc get -prev]
- error_check_bad dbc:get:curr [llength $ret] 0
- error_check_good dbc:get:curr:compare \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
- error_check_good dbc:get:curr:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
-
- # The rest of the test was written with the old rrecno semantics,
- # which required a separate c_del(CURRENT) test; to leave
- # the database in the expected state, we now delete the first item.
- set ret [$dbc get -first]
- error_check_bad getfirst [llength $ret] 0
- error_check_good delfirst [$dbc del] 0
-
- puts "\tTest052: Inserts."
- puts "\tTest052.g: Insert before (DB_BEFORE)."
- set i 1
- set ret [$dbc get -first]
- error_check_bad dbc:get:first [llength $ret] 0
- error_check_good dbc_get:first \
- [lindex [lindex $ret 0] 0] $keys($i)
- error_check_good dbc_get:first:data \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
-
- set ret [$dbc put -before $darray($i)]
- # should return new key, which should be $keys($i)
- error_check_good dbc_put:before $ret $keys($i)
- # cursor should adjust to point to new item
- set ret [$dbc get -current]
- error_check_bad dbc_get:curr [llength $ret] 0
- error_check_good dbc_put:before:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
- error_check_good dbc_put:before:data \
- [lindex [lindex $ret 0] 1] $darray($i)
-
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- error_check_good dbc_get:next:compare \
- $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
- set ret [$dbc get -prev]
- error_check_bad dbc_get:prev [llength $ret] 0
-
- puts "\tTest052.h: Insert by cursor after (DB_AFTER)."
- set i [incr i]
- set ret [$dbc put -after $darray($i)]
- # should return new key, which should be $keys($i)
- error_check_good dbcput:after $ret $keys($i)
- # cursor should reference new item
- set ret [$dbc get -current]
- error_check_good dbc:get:current:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
- error_check_good dbc:get:current:data \
- [lindex [lindex $ret 0] 1] $darray($i)
-
- # items after curs should be adjusted
- set ret [$dbc get -next]
- error_check_bad dbc:get:next [llength $ret] 0
- error_check_good dbc:get:next:compare \
- $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
-
- puts "\tTest052.i: Insert (overwrite) current item (DB_CURRENT)."
- set i 1
- set ret [$dbc get -first]
- error_check_bad dbc_get:first [llength $ret] 0
- # choose a datum that is not currently in db
- set ret [$dbc put -current $darray([expr $i + 2])]
- error_check_good dbc_put:curr $ret 0
- # curs should be on new item
- set ret [$dbc get -current]
- error_check_bad dbc_get:current [llength $ret] 0
- error_check_good dbc_get:curr:keys \
- [lindex [lindex $ret 0] 0] $keys($i)
- error_check_good dbc_get:curr:data \
- [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
-
- set ret [$dbc get -next]
- error_check_bad dbc_get:next [llength $ret] 0
- set i [incr i]
- error_check_good dbc_get:next \
- $ret [list [list $keys($i) $darray($i)]]
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest052 complete."
-}
diff --git a/storage/bdb/test/test053.tcl b/storage/bdb/test/test053.tcl
deleted file mode 100644
index 5c5e060ec48..00000000000
--- a/storage/bdb/test/test053.tcl
+++ /dev/null
@@ -1,231 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test053.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $
-#
-# TEST test053
-# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
-# TEST methods.
-proc test053 { method args } {
- global alphabet
- global errorCode
- global is_je_test
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "\tTest053: Test of cursor stability across btree splits."
- if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
- puts "Test053: skipping for method $method."
- return
- }
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test053: skipping for specific pagesizes"
- return
- }
-
- set txn ""
- set flags ""
-
- puts "\tTest053.a: Create $omethod $args database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test053.db
- set env NULL
- } else {
- set testfile test053.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set oflags \
- "-create -revsplitoff -pagesize 1024 $args $omethod"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 8
- set npages 15
-
- # We want to create a db with npages leaf pages, and have each page
- # be near full with keys that we can predict. We set pagesize above
- # to 1024 bytes, it should breakdown as follows (per page):
- #
- # ~20 bytes overhead
- # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
- # data: ~4 bytes overhead, + 100 bytes
- #
- # then, with 8 keys/page we should be just under 1024 bytes
- puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
- set keystring [string range $alphabet 0 [expr $npages -1]]
- set data [repeat DATA 22]
- for { set i 0 } { $i < $npages } {incr i } {
- set key ""
- set keyroot \
- [repeat [string toupper [string range $keystring $i $i]] 3]
- set key_set($i) $keyroot
- for {set j 0} { $j < $nkeys} {incr j} {
- if { $j < 10 } {
- set key [set keyroot]0$j
- } else {
- set key $keyroot$j
- }
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- }
-
- if { !$is_je_test } {
- puts "\tTest053.c: Check page count."
- error_check_good page_count:check \
- [is_substr [$db stat] "{Leaf pages} $npages"] 1
- }
-
- puts "\tTest053.d: Delete all but one key per page."
- for {set i 0} { $i < $npages } {incr i } {
- for {set j 1} { $j < $nkeys } {incr j } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db del} $txn {$key_set($i)0$j}]
- error_check_good dbdel $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- }
-
- if { !$is_je_test } {
- puts "\tTest053.e: Check to make sure all pages are still there."
- error_check_good page_count:check \
- [is_substr [$db stat] "{Leaf pages} $npages"] 1
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
-
- # walk cursor through tree forward, backward.
- # delete one key, repeat
- for {set i 0} { $i < $npages} {incr i} {
- puts -nonewline \
- "\tTest053.f.$i: Walk curs through tree: forward..."
- for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
- incr j; set curr [$dbc get -next]} {
- error_check_bad dbc:get:next [llength $curr] 0
- error_check_good dbc:get:keys \
- [lindex [lindex $curr 0] 0] $key_set($j)00
- }
- puts -nonewline "backward..."
- for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
- $j >= $i } { \
- set j [incr j -1]; set curr [$dbc get -prev]} {
- error_check_bad dbc:get:prev [llength $curr] 0
- error_check_good dbc:get:keys \
- [lindex [lindex $curr 0] 0] $key_set($j)00
- }
- puts "complete."
-
- if { [is_rbtree $method] == 1} {
- puts "\t\tTest053.f.$i:\
- Walk through tree with record numbers."
- for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
- set curr [eval {$db get} $txn {-recno $j}]
- error_check_bad \
- db_get:recno:$j [llength $curr] 0
- error_check_good db_get:recno:keys:$j \
- [lindex [lindex $curr 0] 0] \
- $key_set([expr $j + $i - 1])00
- }
- }
- puts "\tTest053.g.$i:\
- Delete single key ([expr $npages - $i] keys left)."
- set ret [eval {$db del} $txn {$key_set($i)00}]
- error_check_good dbdel $ret 0
- error_check_good del:check \
- [llength [eval {$db get} $txn {$key_set($i)00}]] 0
- }
-
- # end for loop, verify db_notfound
- set ret [$dbc get -first]
- error_check_good dbc:get:verify [llength $ret] 0
-
- # loop: until single key restored on each page
- for {set i 0} { $i < $npages} {incr i} {
- puts "\tTest053.i.$i:\
- Restore single key ([expr $i + 1] keys in tree)."
- set ret [eval {$db put} $txn {$key_set($i)00 $data}]
- error_check_good dbput $ret 0
-
- puts -nonewline \
- "\tTest053.j: Walk cursor through tree: forward..."
- for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
- incr j; set curr [$dbc get -next]} {
- error_check_bad dbc:get:next [llength $curr] 0
- error_check_good dbc:get:keys \
- [lindex [lindex $curr 0] 0] $key_set($j)00
- }
- error_check_good dbc:get:next [llength $curr] 0
-
- puts -nonewline "backward..."
- for { set j $i; set curr [$dbc get -last]} { \
- $j >= 0 } { \
- set j [incr j -1]; set curr [$dbc get -prev]} {
- error_check_bad dbc:get:prev [llength $curr] 0
- error_check_good dbc:get:keys \
- [lindex [lindex $curr 0] 0] $key_set($j)00
- }
- puts "complete."
- error_check_good dbc:get:prev [llength $curr] 0
-
- if { [is_rbtree $method] == 1} {
- puts "\t\tTest053.k.$i:\
- Walk through tree with record numbers."
- for {set j 1} {$j <= [expr $i + 1]} {incr j} {
- set curr [eval {$db get} $txn {-recno $j}]
- error_check_bad \
- db_get:recno:$j [llength $curr] 0
- error_check_good db_get:recno:keys:$j \
- [lindex [lindex $curr 0] 0] \
- $key_set([expr $j - 1])00
- }
- }
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "Test053 complete."
-}
diff --git a/storage/bdb/test/test054.tcl b/storage/bdb/test/test054.tcl
deleted file mode 100644
index 44d0335f469..00000000000
--- a/storage/bdb/test/test054.tcl
+++ /dev/null
@@ -1,460 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test054.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $
-#
-# TEST test054
-# TEST Cursor maintenance during key/data deletion.
-# TEST
-# TEST This test checks for cursor maintenance in the presence of deletes.
-# TEST There are N different scenarios to tests:
-# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
-# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
-# TEST do a next on the cursor.
-# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
-# TEST of K, do a current get on K.
-# TEST 4. Repeat 3 but do a next instead of current.
-# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
-# TEST does a delete. Then we do a non-cursor get.
-# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# TEST do a delete of the entire Key. Test cursor current.
-# TEST 7. Continue last test and try cursor next.
-# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# TEST Cursor B is in the same duplicate set and deletes a different item.
-# TEST Verify that the cursor is in the right place.
-# TEST 9. Cursors A and B are in the place in the same duplicate set. A
-# TEST deletes its item. Do current on B.
-# TEST 10. Continue 8 and do a next on B.
-proc test054 { method args } {
- global errorInfo
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- append args " -create -mode 0644"
- puts "Test054 ($method $args):\
- interspersed cursor and normal operations"
- if { [is_record_based $method] == 1 } {
- puts "Test054 skipping for method $method"
- return
- }
-
- # Find the environment in the argument list, we'll need it
- # later.
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- }
-
- # Create the database and open the dictionary
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test054-nodup.db
- set env NULL
- } else {
- set testfile test054-nodup.db
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set flags ""
- set txn ""
-
- puts "\tTest054.a: No Duplicate Tests"
- set db [eval {berkdb_open} $args {$omethod $testfile}]
- error_check_good db_open:nodup [is_valid_db $db] TRUE
-
- # Put three keys in the database
- for { set key 1 } { $key <= 3 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $flags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- for {set d [$curs get -first] } \
- {[llength $d] != 0 } \
- {set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- # Test case #1.
- puts "\tTest054.a1: Delete w/cursor, regular get"
-
- # Now set the cursor on the middle on.
- set r [$curs get -set $key_set(2)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good curs_get:DB_SET:data $d datum$key_set(2)
-
- # Now do the delete
- set r [$curs del]
- error_check_good curs_del $r 0
-
- # Now do the get
- set r [eval {$db get} $txn {$key_set(2)}]
- error_check_good get_after_del [llength $r] 0
-
- # Free up the cursor.
- error_check_good cursor_close [eval {$curs close}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Test case #2.
- puts "\tTest054.a2: Cursor before K, delete K, cursor next"
-
- # Replace key 2
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # Open and position cursor on first item.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- for {set d [eval {$curs get} -first] } \
- {[llength $d] != 0 } \
- {set d [$curs get -nextdup] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- set r [eval {$curs get} -set {$key_set(1)} ]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(1)
- error_check_good curs_get:DB_SET:data $d datum$key_set(1)
-
- # Now delete (next item) $key_set(2)
- error_check_good \
- db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
-
- # Now do next on cursor
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(3)
- error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
-
- # Test case #3.
- puts "\tTest054.a3: Cursor on K, delete K, cursor current"
-
- # delete item 3
- error_check_good \
- db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
- # NEEDS TO COME BACK IN, BUG CHECK
- set ret [$curs get -current]
- error_check_good current_after_del $ret ""
- error_check_good cursor_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- puts "\tTest054.a4: Cursor on K, delete K, cursor next"
-
- # Restore keys 2 and 3
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
- error_check_good put $r 0
- set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # Create the new cursor and put it on 1
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
- set r [$curs get -set $key_set(1)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(1)
- error_check_good curs_get:DB_SET:data $d datum$key_set(1)
-
- # Delete 2
- error_check_good \
- db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
-
- # Now do next on cursor
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(3)
- error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
-
- # Close cursor
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now get ready for duplicate tests
-
- if { [is_rbtree $method] == 1 } {
- puts "Test054: skipping remainder of test for method $method."
- return
- }
-
- puts "\tTest054.b: Duplicate Tests"
- append args " -dup"
-
- # Open a new database for the dup tests so -truncate is not needed.
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test054-dup.db
- set env NULL
- } else {
- set testfile test054-dup.db
- set env [lindex $args $eindex]
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set flags ""
- set txn ""
-
- set db [eval {berkdb_open} $args {$omethod $testfile}]
- error_check_good db_open:dup [is_valid_db $db] TRUE
-
- # Put three keys in the database
- for { set key 1 } { $key <= 3 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $flags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Retrieve keys sequentially so we can figure out their order
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
-
- set i 1
- for {set d [$curs get -first] } \
- {[llength $d] != 0 } \
- {set d [$curs get -nextdup] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- # Now put in a bunch of duplicates for key 2
- for { set d 1 } { $d <= 5 } {incr d} {
- set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
- error_check_good dup:put $r 0
- }
-
- # Test case #5.
- puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
-
- # Now set the cursor on the first of the duplicate set.
- set r [eval {$curs get} -set {$key_set(2)}]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good curs_get:DB_SET:data $d datum$key_set(2)
-
- # Now do the delete
- set r [$curs del]
- error_check_good curs_del $r 0
-
- # Now do the get
- set r [eval {$db get} $txn {$key_set(2)}]
- error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
-
- # Test case #6.
- puts "\tTest054.b2: Now get the next duplicate from the cursor."
-
- # Now do next on cursor
- set r [$curs get -nextdup]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_1
-
- # Test case #3.
- puts "\tTest054.b3: Two cursors in set; each delete different items"
-
- # Open a new cursor.
- set curs2 [eval {$db cursor} $txn]
- error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
-
- # Set on last of duplicate set.
- set r [$curs2 get -set $key_set(3)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(3)
- error_check_good curs_get:DB_SET:data $d datum$key_set(3)
-
- set r [$curs2 get -prev]
- error_check_bad cursor_get:DB_PREV [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_PREV:key $k $key_set(2)
- error_check_good curs_get:DB_PREV:data $d dup_5
-
- # Delete the item at cursor 1 (dup_1)
- error_check_good curs1_del [$curs del] 0
-
- # Verify curs1 and curs2
- # current should fail
- set ret [$curs get -current]
- error_check_good curs1_get_after_del $ret ""
-
- set r [$curs2 get -current]
- error_check_bad curs2_get [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
- error_check_good curs_get:DB_CURRENT:data $d dup_5
-
- # Now delete the item at cursor 2 (dup_5)
- error_check_good curs2_del [$curs2 del] 0
-
- # Verify curs1 and curs2
- set ret [$curs get -current]
- error_check_good curs1_get:del2 $ret ""
-
- set ret [$curs2 get -current]
- error_check_good curs2_get:del2 $ret ""
-
- # Now verify that next and prev work.
-
- set r [$curs2 get -prev]
- error_check_bad cursor_get:DB_PREV [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_PREV:key $k $key_set(2)
- error_check_good curs_get:DB_PREV:data $d dup_4
-
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_2
-
- puts "\tTest054.b4: Two cursors same item, one delete, one get"
-
- # Move curs2 onto dup_2
- set r [$curs2 get -prev]
- error_check_bad cursor_get:DB_PREV [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_PREV:key $k $key_set(2)
- error_check_good curs_get:DB_PREV:data $d dup_3
-
- set r [$curs2 get -prev]
- error_check_bad cursor_get:DB_PREV [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_PREV:key $k $key_set(2)
- error_check_good curs_get:DB_PREV:data $d dup_2
-
- # delete on curs 1
- error_check_good curs1_del [$curs del] 0
-
- # Verify gets on both 1 and 2
- set ret [$curs get -current]
- error_check_good \
- curs1_get:deleted $ret ""
- set ret [$curs2 get -current]
- error_check_good \
- curs2_get:deleted $ret ""
-
- puts "\tTest054.b5: Now do a next on both cursors"
-
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_3
-
- set r [$curs2 get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_3
-
- # Close cursor
- error_check_good curs_close [$curs close] 0
- error_check_good curs2_close [$curs2 close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test055.tcl b/storage/bdb/test/test055.tcl
deleted file mode 100644
index 96bf108c308..00000000000
--- a/storage/bdb/test/test055.tcl
+++ /dev/null
@@ -1,141 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test055.tcl,v 11.18 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test055
-# TEST Basic cursor operations.
-# TEST This test checks basic cursor operations.
-# TEST There are N different scenarios to tests:
-# TEST 1. (no dups) Set cursor, retrieve current.
-# TEST 2. (no dups) Set cursor, retrieve next.
-# TEST 3. (no dups) Set cursor, retrieve prev.
-proc test055 { method args } {
- global errorInfo
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test055: $method interspersed cursor and normal operations"
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test055.db
- set env NULL
- } else {
- set testfile test055.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set flags ""
- set txn ""
-
- puts "\tTest055.a: No duplicates"
- set db [eval {berkdb_open -create -mode 0644 $omethod } \
- $args {$testfile}]
- error_check_good db_open:nodup [is_valid_db $db] TRUE
-
- # Put three keys in the database
- for { set key 1 } { $key <= 3 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $flags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
-
- for {set d [$curs get -first] } { [llength $d] != 0 } {\
- set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- # Test case #1.
- puts "\tTest055.a1: Set cursor, retrieve current"
-
- # Now set the cursor on the middle on.
- set r [$curs get -set $key_set(2)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good \
- curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
-
- # Now retrieve current
- set r [$curs get -current]
- error_check_bad cursor_get:DB_CURRENT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
- error_check_good \
- curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
-
- # Test case #2.
- puts "\tTest055.a2: Set cursor, retrieve previous"
- set r [$curs get -prev]
- error_check_bad cursor_get:DB_PREV [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_PREV:key $k $key_set(1)
- error_check_good \
- curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
-
- # Test case #3.
- puts "\tTest055.a2: Set cursor, retrieve next"
-
- # Now set the cursor on the middle one.
- set r [$curs get -set $key_set(2)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good \
- curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
-
- # Now retrieve next
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(3)
- error_check_good \
- curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
-
- # Close cursor and database.
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test056.tcl b/storage/bdb/test/test056.tcl
deleted file mode 100644
index f689d659230..00000000000
--- a/storage/bdb/test/test056.tcl
+++ /dev/null
@@ -1,169 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test056.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $
-#
-# TEST test056
-# TEST Cursor maintenance during deletes.
-# TEST Check if deleting a key when a cursor is on a duplicate of that
-# TEST key works.
-proc test056 { method args } {
- global errorInfo
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- append args " -create -mode 0644 -dup "
- if { [is_record_based $method] == 1 || [is_rbtree $method] } {
- puts "Test056: skipping for method $method"
- return
- }
- puts "Test056: $method delete of key in presence of cursor"
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test056.db
- set env NULL
- } else {
- set testfile test056.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set flags ""
- set txn ""
-
- set db [eval {berkdb_open} $args {$omethod $testfile}]
- error_check_good db_open:dup [is_valid_db $db] TRUE
-
- puts "\tTest056.a: Key delete with cursor on duplicate."
- # Put three keys in the database
- for { set key 1 } { $key <= 3 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $flags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
-
- for {set d [$curs get -first] } { [llength $d] != 0 } {
- set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- # Now put in a bunch of duplicates for key 2
- for { set d 1 } { $d <= 5 } {incr d} {
- set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
- error_check_good dup:put $r 0
- }
-
- # Now put the cursor on a duplicate of key 2
-
- # Now set the cursor on the first of the duplicate set.
- set r [$curs get -set $key_set(2)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good curs_get:DB_SET:data $d datum$key_set(2)
-
- # Now do two nexts
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_1
-
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_2
-
- # Now do the delete
- set r [eval {$db del} $txn $flags {$key_set(2)}]
- error_check_good delete $r 0
-
- # Now check the get current on the cursor.
- set ret [$curs get -current]
- error_check_good curs_after_del $ret ""
-
- # Now check that the rest of the database looks intact. There
- # should be only two keys, 1 and 3.
-
- set r [$curs get -first]
- error_check_bad cursor_get:DB_FIRST [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_FIRST:key $k $key_set(1)
- error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
-
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(3)
- error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
-
- set r [$curs get -next]
- error_check_good cursor_get:DB_NEXT [llength $r] 0
-
- puts "\tTest056.b:\
- Cursor delete of first item, followed by cursor FIRST"
- # Set to beginning
- set r [$curs get -first]
- error_check_bad cursor_get:DB_FIRST [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_FIRST:key $k $key_set(1)
- error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
-
- # Now do delete
- error_check_good curs_del [$curs del] 0
-
- # Now do DB_FIRST
- set r [$curs get -first]
- error_check_bad cursor_get:DB_FIRST [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_FIRST:key $k $key_set(3)
- error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
-
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test057.tcl b/storage/bdb/test/test057.tcl
deleted file mode 100644
index 56fe2bf44ec..00000000000
--- a/storage/bdb/test/test057.tcl
+++ /dev/null
@@ -1,201 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test057.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $
-#
-# TEST test057
-# TEST Cursor maintenance during key deletes.
-# TEST 1. Delete a key with a cursor. Add the key back with a regular
-# TEST put. Make sure the cursor can't get the new item.
-# TEST 2. Put two cursors on one item. Delete through one cursor,
-# TEST check that the other sees the change.
-# TEST 3. Same as 2, with the two cursors on a duplicate.
-
-proc test057 { method args } {
- global errorInfo
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- append args " -create -mode 0644 -dup "
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test057: skipping for method $method"
- return
- }
- puts "Test057: $method delete and replace in presence of cursor."
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test057.db
- set env NULL
- } else {
- set testfile test057.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set flags ""
- set txn ""
-
- set db [eval {berkdb_open} $args {$omethod $testfile}]
- error_check_good dbopen:dup [is_valid_db $db] TRUE
-
- puts "\tTest057.a: Set cursor, delete cursor, put with key."
- # Put three keys in the database
- for { set key 1 } { $key <= 3 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $flags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
-
- for {set d [$curs get -first] } {[llength $d] != 0 } \
- {set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- # Now put in a bunch of duplicates for key 2
- for { set d 1 } { $d <= 5 } {incr d} {
- set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
- error_check_good dup:put $r 0
- }
-
- # Now put the cursor on key 1
-
- # Now set the cursor on the first of the duplicate set.
- set r [$curs get -set $key_set(1)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(1)
- error_check_good curs_get:DB_SET:data $d datum$key_set(1)
-
- # Now do the delete
- set r [$curs del]
- error_check_good delete $r 0
-
- # Now check the get current on the cursor.
- error_check_good curs_get:del [$curs get -current] ""
-
- # Now do a put on the key
- set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
- error_check_good put $r 0
-
- # Do a get
- set r [eval {$db get} $txn {$key_set(1)}]
- error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
-
- # Recheck cursor
- error_check_good curs_get:deleted [$curs get -current] ""
-
- # Move cursor and see if we get the key.
- set r [$curs get -first]
- error_check_bad cursor_get:DB_FIRST [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_FIRST:key $k $key_set(1)
- error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
-
- puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
- set curs2 [eval {$db cursor} $txn]
- error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
-
- # Set both cursors on the 4rd key
- set r [$curs get -set $key_set(3)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(3)
- error_check_good curs_get:DB_SET:data $d datum$key_set(3)
-
- set r [$curs2 get -set $key_set(3)]
- error_check_bad cursor2_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs2_get:DB_SET:key $k $key_set(3)
- error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
-
- # Now delete through cursor 1
- error_check_good curs1_del [$curs del] 0
-
- # Verify gets on both 1 and 2
- error_check_good curs_get:deleted [$curs get -current] ""
- error_check_good curs_get:deleted [$curs2 get -current] ""
-
- puts "\tTest057.c:\
- Set two cursors on a dup, delete one, overwrite other"
-
- # Set both cursors on the 2nd duplicate of key 2
- set r [$curs get -set $key_set(2)]
- error_check_bad cursor_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_SET:key $k $key_set(2)
- error_check_good curs_get:DB_SET:data $d datum$key_set(2)
-
- set r [$curs get -next]
- error_check_bad cursor_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs_get:DB_NEXT:data $d dup_1
-
- set r [$curs2 get -set $key_set(2)]
- error_check_bad cursor2_get:DB_SET [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs2_get:DB_SET:key $k $key_set(2)
- error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
-
- set r [$curs2 get -next]
- error_check_bad cursor2_get:DB_NEXT [llength $r] 0
- set k [lindex [lindex $r 0] 0]
- set d [lindex [lindex $r 0] 1]
- error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
- error_check_good curs2_get:DB_NEXT:data $d dup_1
-
- # Now delete through cursor 1
- error_check_good curs1_del [$curs del] 0
-
- # Verify gets on both 1 and 2
- error_check_good curs_get:deleted [$curs get -current] ""
- error_check_good curs_get:deleted [$curs2 get -current] ""
-
- error_check_good curs2_close [$curs2 close] 0
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test058.tcl b/storage/bdb/test/test058.tcl
deleted file mode 100644
index 4213e7279b4..00000000000
--- a/storage/bdb/test/test058.tcl
+++ /dev/null
@@ -1,103 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test058.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test058
-# TEST Verify that deleting and reading duplicates results in correct ordering.
-proc test058 { method args } {
- source ./include.tcl
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test058 skipping for env $env"
- return
- }
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test058: skipping for method $method"
- return
- }
- puts "Test058: $method delete dups after inserting after duped key."
-
- # environment
- env_cleanup $testdir
- set eflags "-create -txn $encargs -home $testdir"
- set env [eval {berkdb_env} $eflags]
- error_check_good env [is_valid_env $env] TRUE
-
- # db open
- set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
- set db [eval {berkdb_open} $flags $omethod "test058.db"]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set tn ""
- set tid ""
- set tn [$env txn]
- set tflags "-txn $tn"
-
- puts "\tTest058.a: Adding 10 duplicates"
- # Add a bunch of dups
- for { set i 0 } { $i < 10 } {incr i} {
- set ret \
- [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
- error_check_good db_put $ret 0
- }
-
- puts "\tTest058.b: Adding key after duplicates"
- # Now add one more key/data AFTER the dup set.
- set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
- error_check_good db_put $ret 0
-
- error_check_good txn_commit [$tn commit] 0
-
- set tn [$env txn]
- error_check_good txnbegin [is_substr $tn $env] 1
- set tflags "-txn $tn"
-
- # Now delete everything
- puts "\tTest058.c: Deleting duplicated key"
- set ret [eval {$db del} $tflags {doghouse}]
- error_check_good del $ret 0
-
- # Now reput everything
- set pad \
- abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
-
- puts "\tTest058.d: Reputting duplicates with big data vals"
- for { set i 0 } { $i < 10 } {incr i} {
- set ret [eval {$db put} \
- $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
- error_check_good db_put $ret 0
- }
- error_check_good txn_commit [$tn commit] 0
-
- # Check duplicates for order
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
- puts "\tTest058.e: Verifying that duplicates are in order."
- set i 0
- for { set ret [$dbc get -set doghouse] } \
- {$i < 10 && [llength $ret] != 0} \
- { set ret [$dbc get -nextdup] } {
- set data [lindex [lindex $ret 0] 1]
- error_check_good \
- duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
- incr i
- }
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- reset_env $env
-}
diff --git a/storage/bdb/test/test059.tcl b/storage/bdb/test/test059.tcl
deleted file mode 100644
index 887ce6d94db..00000000000
--- a/storage/bdb/test/test059.tcl
+++ /dev/null
@@ -1,150 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test059.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test059
-# TEST Cursor ops work with a partial length of 0.
-# TEST Make sure that we handle retrieves of zero-length data items correctly.
-# TEST The following ops, should allow a partial data retrieve of 0-length.
-# TEST db_get
-# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
-proc test059 { method args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test059: $method 0-length partial data retrieval"
-
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test059.db
- set env NULL
- } else {
- set testfile test059.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
-
- puts "\tTest059.a: Populate a database"
- set oflags "-create -mode 0644 $omethod $args $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_create [is_substr $db db] 1
-
- # Put ten keys in the database
- for { set key 1 } { $key <= 10 } {incr key} {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} $txn $pflags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good db_curs [is_valid_cursor $curs $db] TRUE
-
- for {set d [$curs get -first] } { [llength $d] != 0 } {
- set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
-
- puts "\tTest059.a: db get with 0 partial length retrieve"
-
- # Now set the cursor on the middle one.
- set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
- error_check_bad db_get_0 [llength $ret] 0
-
- puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
- set ret [$curs get -first -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_first $key $key_set(1)
- error_check_good db_cget_first [string length $data] 0
-
- puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
- set ret [$curs get -next -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_next $key $key_set(2)
- error_check_good db_cget_next [string length $data] 0
-
- puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
- set ret [$curs get -last -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_last $key $key_set(10)
- error_check_good db_cget_last [string length $data] 0
-
- puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
- set ret [$curs get -prev -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_prev $key $key_set(9)
- error_check_good db_cget_prev [string length $data] 0
-
- puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
- set ret [$curs get -current -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_current $key $key_set(9)
- error_check_good db_cget_current [string length $data] 0
-
- puts "\tTest059.f: db cget SET with 0 partial length retrieve"
- set ret [$curs get -set -partial {0 0} $key_set(7)]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_set $key $key_set(7)
- error_check_good db_cget_set [string length $data] 0
-
- if {[is_btree $method] == 1} {
- puts "\tTest059.g:\
- db cget SET_RANGE with 0 partial length retrieve"
- set ret [$curs get -set_range -partial {0 0} $key_set(5)]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_set $key $key_set(5)
- error_check_good db_cget_set [string length $data] 0
- }
-
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test060.tcl b/storage/bdb/test/test060.tcl
deleted file mode 100644
index 770fb5d052f..00000000000
--- a/storage/bdb/test/test060.tcl
+++ /dev/null
@@ -1,60 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test060.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test060
-# TEST Test of the DB_EXCL flag to DB->open().
-# TEST 1) Attempt to open and create a nonexistent database; verify success.
-# TEST 2) Attempt to reopen it; verify failure.
-proc test060 { method args } {
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
-
- # Set the database location and make sure the db doesn't exist yet
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test060.db
- set env NULL
- } else {
- set testfile test060.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- # Create the database and check success
- puts "\tTest060.a: open and close non-existent file with DB_EXCL"
- set db [eval {berkdb_open \
- -create -excl -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen:excl [is_valid_db $db] TRUE
-
- # Close it and check success
- error_check_good db_close [$db close] 0
-
- # Try to open it again, and make sure the open fails
- puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
- set errorCode NONE
- error_check_good open:excl:catch [catch { \
- set db [eval {berkdb_open_noerr \
- -create -excl -mode 0644} $args {$omethod $testfile}]
- } ret ] 1
-
- error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
-}
diff --git a/storage/bdb/test/test061.tcl b/storage/bdb/test/test061.tcl
deleted file mode 100644
index f4b12c45df5..00000000000
--- a/storage/bdb/test/test061.tcl
+++ /dev/null
@@ -1,226 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test061.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test061
-# TEST Test of txn abort and commit for in-memory databases.
-# TEST a) Put + abort: verify absence of data
-# TEST b) Put + commit: verify presence of data
-# TEST c) Overwrite + abort: verify that data is unchanged
-# TEST d) Overwrite + commit: verify that data has changed
-# TEST e) Delete + abort: verify that data is still present
-# TEST f) Delete + commit: verify that data has been deleted
-proc test061 { method args } {
- global alphabet
- global encrypt
- global errorCode
- global passwd
- source ./include.tcl
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test061 skipping for env $env"
- return
- }
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- if { [is_queueext $method] == 1} {
- puts "Test061 skipping for method $method"
- return
- }
- set encargs ""
- set args [split_encargs $args encargs]
-
- puts "Test061: Transaction abort and commit test for in-memory data."
- puts "Test061: $method $args"
-
- set key "key"
- set data "data"
- set otherdata "otherdata"
- set txn ""
- set flags ""
- set gflags ""
-
- if { [is_record_based $method] == 1} {
- set key 1
- set gflags " -recno"
- }
-
- puts "\tTest061: Create environment and $method database."
- env_cleanup $testdir
-
- # create environment
- set eflags "-create -txn $encargs -home $testdir"
- set dbenv [eval {berkdb_env} $eflags]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # db open -- no file specified, in-memory database
- set flags "-auto_commit -create $args $omethod"
- set db [eval {berkdb_open -env} $dbenv $flags]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Here we go with the six test cases. Since we need to verify
- # a different thing each time, and since we can't just reuse
- # the same data if we're to test overwrite, we just
- # plow through rather than writing some impenetrable loop code;
- # each of the cases is only a few lines long, anyway.
-
- puts "\tTest061.a: put/abort"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # put a key
- set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
- error_check_good db_put $ret 0
-
- # check for existence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $data]]]
-
- # abort
- error_check_good txn_abort [$txn abort] 0
-
- # check for *non*-existence
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret {}
-
- puts "\tTest061.b: put/commit"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # put a key
- set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
- error_check_good db_put $ret 0
-
- # check for existence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $data]]]
-
- # commit
- error_check_good txn_commit [$txn commit] 0
-
- # check again for existence
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $data]]]
-
- puts "\tTest061.c: overwrite/abort"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # overwrite {key,data} with {key,otherdata}
- set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
- error_check_good db_put $ret 0
-
- # check for existence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret \
- [list [list $key [pad_data $method $otherdata]]]
-
- # abort
- error_check_good txn_abort [$txn abort] 0
-
- # check that data is unchanged ($data not $otherdata)
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $data]]]
-
- puts "\tTest061.d: overwrite/commit"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # overwrite {key,data} with {key,otherdata}
- set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
- error_check_good db_put $ret 0
-
- # check for existence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret \
- [list [list $key [pad_data $method $otherdata]]]
-
- # commit
- error_check_good txn_commit [$txn commit] 0
-
- # check that data has changed ($otherdata not $data)
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret \
- [list [list $key [pad_data $method $otherdata]]]
-
- puts "\tTest061.e: delete/abort"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # delete
- set ret [eval {$db del} -txn $txn {$key}]
- error_check_good db_put $ret 0
-
- # check for nonexistence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret {}
-
- # abort
- error_check_good txn_abort [$txn abort] 0
-
- # check for existence
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret \
- [list [list $key [pad_data $method $otherdata]]]
-
- puts "\tTest061.f: delete/commit"
-
- # txn_begin
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
-
- # put a key
- set ret [eval {$db del} -txn $txn {$key}]
- error_check_good db_put $ret 0
-
- # check for nonexistence
- set ret [eval {$db get} -txn $txn $gflags {$key}]
- error_check_good get $ret {}
-
- # commit
- error_check_good txn_commit [$txn commit] 0
-
- # check for continued nonexistence
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret {}
-
- # We're done; clean up.
- error_check_good db_close [eval {$db close}] 0
- error_check_good env_close [eval {$dbenv close}] 0
-
- # Now run db_recover and ensure that it runs cleanly.
- set utilflag ""
- if { $encrypt != 0 } {
- set utilflag "-P $passwd"
- }
- puts "\tTest061.g: Running db_recover -h"
- set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
- $utilflag} res]
- if { $ret != 0 } {
- puts "FAIL: db_recover outputted $res"
- }
- error_check_good db_recover $ret 0
-
- puts "\tTest061.h: Running db_recover -c -h"
- set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
- $utilflag} res]
- error_check_good db_recover-c $ret 0
-}
diff --git a/storage/bdb/test/test062.tcl b/storage/bdb/test/test062.tcl
deleted file mode 100644
index f26c66d6a1c..00000000000
--- a/storage/bdb/test/test062.tcl
+++ /dev/null
@@ -1,153 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test062.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test062
-# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
-# TEST Insert the first 200 words into the dictionary 200 times each with
-# TEST self as key and <random letter>:self as data. Use partial puts to
-# TEST append self again to data; verify correctness.
-proc test062 { method {nentries 200} {ndups 200} {tnum "062"} args } {
- global alphabet
- global rand_init
- source ./include.tcl
-
- berkdb srand $rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $omethod"
- return
- }
- # Create the database and open the dictionary
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 200 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "Test$tnum:\
- $method ($args) $nentries Partial puts and $ndups duplicates."
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod -dup} $args {$testfile} ]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put each key/data pair
- puts "\tTest$tnum.a: Put loop (initialize database)"
- while { [gets $did str] != -1 && $count < $nentries } {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set pref \
- [string index $alphabet [berkdb random_int 0 25]]
- set datastr $pref:$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- set keys($count) $str
-
- incr count
- }
- close $did
-
- puts "\tTest$tnum.b: Partial puts."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
-
- # Do a partial write to extend each datum in
- # the regular db by the corresponding dictionary word.
- # We have to go through each key's dup set using -set
- # because cursors are not stable in the hash AM and we
- # want to make sure we hit all the keys.
- for { set i 0 } { $i < $count } { incr i } {
- set key $keys($i)
- for {set ret [$dbc get -set $key]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -nextdup]} {
-
- set k [lindex [lindex $ret 0] 0]
- set orig_d [lindex [lindex $ret 0] 1]
- set d [string range $orig_d 2 end]
- set doff [expr [string length $d] + 2]
- set dlen 0
- error_check_good data_and_key_sanity $d $k
-
- set ret [$dbc get -current]
- error_check_good before_sanity \
- [lindex [lindex $ret 0] 0] \
- [string range [lindex [lindex $ret 0] 1] 2 end]
-
- error_check_good partial_put [eval {$dbc put -current \
- -partial [list $doff $dlen] $d}] 0
-
- set ret [$dbc get -current]
- error_check_good partial_put_correct \
- [lindex [lindex $ret 0] 1] $orig_d$d
- }
- }
-
- puts "\tTest$tnum.c: Double-checking get loop."
- # Double-check that each datum in the regular db has
- # been appropriately modified.
-
- for {set ret [$dbc get -first]} \
- {[llength $ret] != 0} \
- {set ret [$dbc get -next]} {
-
- set k [lindex [lindex $ret 0] 0]
- set d [lindex [lindex $ret 0] 1]
- error_check_good modification_correct \
- [string range $d 2 end] [repeat $k 2]
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test063.tcl b/storage/bdb/test/test063.tcl
deleted file mode 100644
index 8918fa4fa00..00000000000
--- a/storage/bdb/test/test063.tcl
+++ /dev/null
@@ -1,174 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test063.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test063
-# TEST Test of the DB_RDONLY flag to DB->open
-# TEST Attempt to both DB->put and DBC->c_put into a database
-# TEST that has been opened DB_RDONLY, and check for failure.
-proc test063 { method args } {
- global errorCode
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "063"
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set key "key"
- set data "data"
- set key2 "another_key"
- set data2 "more_data"
-
- set gflags ""
- set txn ""
-
- if { [is_record_based $method] == 1 } {
- set key "1"
- set key2 "2"
- append gflags " -recno"
- }
-
- puts "Test$tnum: $method ($args) DB_RDONLY test."
-
- # Create a test database.
- puts "\tTest$tnum.a: Creating test database."
- set db [eval {berkdb_open_noerr -create -mode 0644} \
- $omethod $args $testfile]
- error_check_good db_create [is_valid_db $db] TRUE
-
- # Put and get an item so it's nonempty.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
- error_check_good initial_put $ret 0
-
- set dbt [eval {$db get} $txn $gflags {$key}]
- error_check_good initial_get $dbt \
- [list [list $key [pad_data $method $data]]]
-
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- if { $eindex == -1 } {
- # Confirm that database is writable. If we are
- # using an env (that may be remote on a server)
- # we cannot do this check.
- error_check_good writable [file writable $testfile] 1
- }
-
- puts "\tTest$tnum.b: Re-opening DB_RDONLY and attempting to put."
-
- # Now open it read-only and make sure we can get but not put.
- set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbt [eval {$db get} $txn $gflags {$key}]
- error_check_good db_get $dbt \
- [list [list $key [pad_data $method $data]]]
-
- set ret [catch {eval {$db put} $txn \
- {$key2 [chop_data $method $data]}} res]
- error_check_good put_failed $ret 1
- error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set errorCode "NONE"
-
- puts "\tTest$tnum.c: Attempting cursor put."
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
-
- error_check_good cursor_set [$dbc get -first] $dbt
- set ret [catch {eval {$dbc put} -current $data} res]
- error_check_good c_put_failed $ret 1
- error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
-
- set dbt [eval {$db get} $gflags {$key2}]
- error_check_good db_get_key2 $dbt ""
-
- puts "\tTest$tnum.d: Attempting ordinary delete."
-
- set errorCode "NONE"
- set ret [catch {eval {$db del} $txn {$key}} 1]
- error_check_good del_failed $ret 1
- error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
-
- set dbt [eval {$db get} $txn $gflags {$key}]
- error_check_good db_get_key $dbt \
- [list [list $key [pad_data $method $data]]]
-
- puts "\tTest$tnum.e: Attempting cursor delete."
- # Just set the cursor to the beginning; we don't care what's there...
- # yet.
- set dbt2 [$dbc get -first]
- error_check_good db_get_first_key $dbt2 $dbt
- set errorCode "NONE"
- set ret [catch {$dbc del} res]
- error_check_good c_del_failed $ret 1
- error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
-
- set dbt2 [$dbc get -current]
- error_check_good db_get_key $dbt2 $dbt
-
- puts "\tTest$tnum.f: Close, reopen db; verify unchanged."
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- set db [eval {berkdb_open} $omethod $args $testfile]
- error_check_good db_reopen [is_valid_db $db] TRUE
-
- set dbc [$db cursor]
- error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
-
- error_check_good first_there [$dbc get -first] \
- [list [list $key [pad_data $method $data]]]
- error_check_good nomore_there [$dbc get -next] ""
-
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test064.tcl b/storage/bdb/test/test064.tcl
deleted file mode 100644
index 13a64657463..00000000000
--- a/storage/bdb/test/test064.tcl
+++ /dev/null
@@ -1,69 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test064.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test064
-# TEST Test of DB->get_type
-# TEST Create a database of type specified by method.
-# TEST Make sure DB->get_type returns the right thing with both a normal
-# TEST and DB_UNKNOWN open.
-proc test064 { method args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "064"
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "Test$tnum: $method ($args) DB->get_type test."
-
- # Create a test database.
- puts "\tTest$tnum.a: Creating test database of type $method."
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $args $testfile]
- error_check_good db_create [is_valid_db $db] TRUE
-
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.b: get_type after method specifier."
-
- set db [eval {berkdb_open} $omethod $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set type [$db get_type]
- error_check_good get_type $type [string range $omethod 1 end]
-
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.c: get_type after DB_UNKNOWN."
-
- set db [eval {berkdb_open} $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set type [$db get_type]
- error_check_good get_type $type [string range $omethod 1 end]
-
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test065.tcl b/storage/bdb/test/test065.tcl
deleted file mode 100644
index cb29b106d24..00000000000
--- a/storage/bdb/test/test065.tcl
+++ /dev/null
@@ -1,208 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test065.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test065
-# TEST Test of DB->stat, both -DB_FAST_STAT and row
-# TEST counts with DB->stat -txn.
-proc test065 { method args } {
- source ./include.tcl
- global errorCode
- global alphabet
-
- set nentries 10000
- set args [convert_args $method $args]
- set omethod [convert_method $method]
- set tnum "065"
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "Test$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
-
- puts "\tTest$tnum.a: Create database and check it while empty."
-
- set db [eval {berkdb_open_noerr -create -mode 0644} \
- $omethod $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set ret [catch {eval $db stat -faststat} res]
-
- error_check_good db_close [$db close] 0
-
- if { ([is_record_based $method] && ![is_queue $method]) \
- || [is_rbtree $method] } {
- error_check_good recordcount_ok [is_substr $res \
- "{{Number of keys} 0}"] 1
- } else {
- puts "\tTest$tnum: Test complete for method $method."
- return
- }
-
- # If we've got this far, we're on an access method for
- # which record counts makes sense. Thus, we no longer
- # catch EINVALs, and no longer care about __db_errs.
- set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
-
- puts "\tTest$tnum.b: put $nentries keys."
-
- if { [is_record_based $method] } {
- set gflags " -recno "
- set keypfx ""
- } else {
- set gflags ""
- set keypfx "key"
- }
-
- set txn ""
- set data [pad_data $method $alphabet]
-
- for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$keypfx$ndx $data}]
- error_check_good db_put $ret 0
- set statret [eval {$db stat} $txn]
- set rowcount [getstats $statret "Number of records"]
- error_check_good rowcount $rowcount $ndx
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [$db stat -faststat]
- error_check_good recordcount_after_puts \
- [is_substr $ret "{{Number of keys} $nentries}"] 1
-
- puts "\tTest$tnum.c: delete 90% of keys."
- set end [expr {$nentries / 10 * 9}]
- for { set ndx 1 } { $ndx <= $end } { incr ndx } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- if { [is_rrecno $method] == 1 } {
- # if we're renumbering, when we hit key 5001 we'll
- # have deleted 5000 and we'll croak! So delete key
- # 1, repeatedly.
- set ret [eval {$db del} $txn {[concat $keypfx 1]}]
- set statret [eval {$db stat} $txn]
- set rowcount [getstats $statret "Number of records"]
- error_check_good rowcount $rowcount [expr $nentries - $ndx]
- } else {
- set ret [eval {$db del} $txn {$keypfx$ndx}]
- set rowcount [getstats $statret "Number of records"]
- error_check_good rowcount $rowcount $nentries
- }
- error_check_good db_del $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [$db stat -faststat]
- if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
- # We allow renumbering--thus the stat should return 10%
- # of nentries.
- error_check_good recordcount_after_dels [is_substr $ret \
- "{{Number of keys} [expr {$nentries / 10}]}"] 1
- } else {
- # No renumbering--no change in RECORDCOUNT!
- error_check_good recordcount_after_dels \
- [is_substr $ret "{{Number of keys} $nentries}"] 1
- }
-
- puts "\tTest$tnum.d: put new keys at the beginning."
- set end [expr {$nentries / 10 * 8}]
- for { set ndx 1 } { $ndx <= $end } {incr ndx } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$keypfx$ndx $data}]
- error_check_good db_put_beginning $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [$db stat -faststat]
- if { [is_rrecno $method] == 1 } {
- # With renumbering we're back up to 80% of $nentries
- error_check_good recordcount_after_dels [is_substr $ret \
- "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
- } elseif { [is_rbtree $method] == 1 } {
- # Total records in a btree is now 90% of $nentries
- error_check_good recordcount_after_dels [is_substr $ret \
- "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
- } else {
- # No renumbering--still no change in RECORDCOUNT.
- error_check_good recordcount_after_dels [is_substr $ret \
- "{{Number of keys} $nentries}"] 1
- }
-
- puts "\tTest$tnum.e: put new keys at the end."
- set start [expr {1 + $nentries / 10 * 9}]
- set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
- for { set ndx $start } { $ndx <= $end } { incr ndx } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$keypfx$ndx $data}]
- error_check_good db_put_end $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [$db stat -faststat]
- if { [is_rbtree $method] != 1 } {
- # If this is a recno database, the record count should be up
- # to (1.7 x nentries), the largest number we've seen, with
- # or without renumbering.
- error_check_good recordcount_after_puts2 [is_substr $ret \
- "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
- } else {
- # In an rbtree, 1000 of those keys were overwrites, so there
- # are (.7 x nentries) new keys and (.9 x nentries) old keys
- # for a total of (1.6 x nentries).
- error_check_good recordcount_after_puts2 [is_substr $ret \
- "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
- }
-
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test066.tcl b/storage/bdb/test/test066.tcl
deleted file mode 100644
index 6f31209071a..00000000000
--- a/storage/bdb/test/test066.tcl
+++ /dev/null
@@ -1,99 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test066.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test066
-# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
-# TEST
-# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
-# TEST database with duplicates.
-proc test066 { method args } {
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- set tnum "066"
-
- if { [is_record_based $method] || [is_rbtree $method] } {
- puts "Test$tnum: Skipping for method $method."
- return
- }
-
- puts "Test$tnum: Test of cursor put to DB_CURRENT with duplicates."
-
- source ./include.tcl
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test066.db
- set env NULL
- } else {
- set testfile test066.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set txn ""
- set key "test"
- set data "olddata"
-
- set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
- $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
- error_check_good db_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set ret [$dbc get -first]
- error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
-
- set newdata "newdata"
- set ret [$dbc put -current [chop_data $method $newdata]]
- error_check_good dbc_put $ret 0
-
- # There should be only one (key,data) pair in the database, and this
- # is it.
- set ret [$dbc get -first]
- error_check_good db_get_first $ret \
- [list [list $key [pad_data $method $newdata]]]
-
- # and this one should come up empty.
- set ret [$dbc get -next]
- error_check_good db_get_next $ret ""
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum: Test completed successfully."
-}
diff --git a/storage/bdb/test/test067.tcl b/storage/bdb/test/test067.tcl
deleted file mode 100644
index 710c6b9c201..00000000000
--- a/storage/bdb/test/test067.tcl
+++ /dev/null
@@ -1,162 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test067.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test067
-# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
-# TEST pages, with and without DB_DUP_SORT.
-# TEST
-# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
-# TEST This test was written to address the following issue, #2 in the
-# TEST list of issues relating to bug #0820:
-# TEST
-# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
-# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
-# TEST first deletes the record and then puts the new one -- this could
-# TEST be a problem if the removal of the record causes a reverse split.
-# TEST Suggested solution is to acquire a cursor to lock down the current
-# TEST record, put a new record after that record, and then delete using
-# TEST the held cursor.
-# TEST
-# TEST It also tests the following, #5 in the same list of issues:
-# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
-# TEST set, duplicate comparison routine specified.
-# TEST The partial change does not change how data items sort, but the
-# TEST record to be put isn't built yet, and that record supplied is the
-# TEST one that's checked for ordering compatibility.
-proc test067 { method {ndups 1000} {tnum "067"} args } {
- source ./include.tcl
- global alphabet
- global errorCode
- global is_je_test
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set txn ""
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
-
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- if { $ndups == 1000 } {
- set ndups 100
- }
- }
- set testdir [get_home $env]
- }
-
- cleanup $testdir $env
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "\tTest$tnum: skipping for method $method."
- return
- }
-
- puts "Test$tnum:\
- $method ($args) Partial puts on near-empty duplicate pages."
-
- foreach dupopt { "-dup" "-dup -dupsort" } {
- if { $is_je_test && $dupopt == "-dup" } {
- continue
- }
-
- #
- # Testdir might get reset from the env's home dir back
- # to the default if this calls something that sources
- # include.tcl, since testdir is a global. Set it correctly
- # here each time through the loop.
- #
- if { $env != "NULL" } {
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- set db [eval {berkdb_open -create -mode 0644 \
- $omethod} $args $dupopt {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.a ($dupopt): Put $ndups duplicates."
-
- set key "key_test$tnum"
-
- for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
- set data $alphabet$ndx
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- # No need for pad_data since we're skipping recno.
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good put($key,$data) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # Sync so we can inspect database if the next section bombs.
- error_check_good db_sync [$db sync] 0
- puts "\tTest$tnum.b ($dupopt):\
- Deleting dups (last first), overwriting each."
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
-
- set count 0
- while { $count < $ndups - 1 } {
- # set cursor to last item in db
- set ret [$dbc get -last]
- error_check_good \
- verify_key [lindex [lindex $ret 0] 0] $key
-
- # for error reporting
- set currdatum [lindex [lindex $ret 0] 1]
-
- # partial-overwrite it
- # (overwrite offsets 1-4 with "bcde"--which they
- # already are)
-
- # Even though we expect success, we catch this
- # since it might return EINVAL, and we want that
- # to FAIL.
- set errorCode NONE
- set ret [catch {eval $dbc put -current \
- {-partial [list 1 4]} "bcde"} \
- res]
- error_check_good \
- partial_put_valid($currdatum) $errorCode NONE
- error_check_good partial_put($currdatum) $res 0
-
- # delete it
- error_check_good dbc_del [$dbc del] 0
-
- #puts $currdatum
-
- incr count
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- }
-}
diff --git a/storage/bdb/test/test068.tcl b/storage/bdb/test/test068.tcl
deleted file mode 100644
index a4fb56da091..00000000000
--- a/storage/bdb/test/test068.tcl
+++ /dev/null
@@ -1,231 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test068.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $
-#
-# TEST test068
-# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
-# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
-# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
-proc test068 { method args } {
- source ./include.tcl
- global alphabet
- global errorCode
- global is_je_test
-
- set tnum "068"
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set nkeys 1000
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- set nkeys 100
- }
- set testdir [get_home $env]
- }
-
- puts "Test$tnum:\
- $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
- if { [is_record_based $method] == 1 } {
- puts "\tTest$tnum: skipping for method $method."
- return
- }
-
- # Create a list of $nkeys words to insert into db.
- puts "\tTest$tnum.a: Initialize word list."
- set txn ""
- set wordlist {}
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nkeys } {
- lappend wordlist $str
- incr count
- }
- close $did
-
- # Sanity check: did we get $nkeys words?
- error_check_good enough_keys [llength $wordlist] $nkeys
-
- # rbtree can't handle dups, so just test the non-dup case
- # if it's the current method.
- if { [is_rbtree $method] == 1 } {
- set dupoptlist { "" }
- } else {
- set dupoptlist { "" "-dup" "-dup -dupsort" }
- }
-
- foreach dupopt $dupoptlist {
- if { $is_je_test && $dupopt == "-dup" } {
- continue
- }
-
- #
- # Testdir might be reset in the loop by some proc sourcing
- # include.tcl. Reset it to the env's home here, before
- # cleanup.
- if { $env != "NULL" } {
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- set db [eval {berkdb_open_noerr -create -mode 0644 \
- $omethod} $args $dupopt {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.b ($dupopt): DB initialization: put loop."
- foreach word $wordlist {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$word $word}]
- error_check_good db_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- puts "\tTest$tnum.c ($dupopt): get loop."
- foreach word $wordlist {
- # Make sure that the Nth word has been correctly
- # inserted, and also that the Nth word is the
- # Nth one we pull out of the database using a cursor.
-
- set dbt [$db get $word]
- error_check_good get_key [list [list $word $word]] $dbt
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
-
- puts "\tTest$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
-
- # Set cursor to the first key; make sure it succeeds.
- # With an unsorted wordlist, we can't be sure that the
- # first item returned will equal the first item in the
- # wordlist, so we just make sure it got something back.
- set dbt [eval {$dbc get -first}]
- error_check_good \
- dbc_get_first [llength $dbt] 1
-
- # If -dup is not set, or if -dupsort is set too, we
- # need to verify that DB_BEFORE and DB_AFTER fail
- # and then move on to the next $dupopt.
- if { $dupopt != "-dup" } {
- set errorCode "NONE"
- set ret [catch {eval $dbc put -after \
- {-partial [list 6 0]} "after"} res]
- error_check_good dbc_put_after_fail $ret 1
- error_check_good dbc_put_after_einval \
- [is_substr $errorCode EINVAL] 1
- puts "\tTest$tnum ($dupopt): DB_AFTER returns EINVAL."
- set errorCode "NONE"
- set ret [catch {eval $dbc put -before \
- {-partial [list 6 0]} "before"} res]
- error_check_good dbc_put_before_fail $ret 1
- error_check_good dbc_put_before_einval \
- [is_substr $errorCode EINVAL] 1
- puts "\tTest$tnum ($dupopt): DB_BEFORE returns EINVAL."
- puts "\tTest$tnum ($dupopt): Correct error returns,\
- skipping further test."
- # continue with broad foreach
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- continue
- }
-
- puts "\tTest$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
- foreach word $wordlist {
- # set cursor to $word
- set dbt [$dbc get -set $word]
- error_check_good \
- dbc_get_set $dbt [list [list $word $word]]
- # put after it
- set ret [$dbc put -after -partial {4 0} after]
- error_check_good dbc_put_after $ret 0
- }
-
- puts "\tTest$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
- foreach word $wordlist {
- # set cursor to $word
- set dbt [$dbc get -set $word]
- error_check_good \
- dbc_get_set $dbt [list [list $word $word]]
- # put before it
- set ret [$dbc put -before -partial {6 0} before]
- error_check_good dbc_put_before $ret 0
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- eval $db sync
- puts "\tTest$tnum.g ($dupopt): Verify correctness."
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # loop through the whole db beginning to end,
- # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
- # {$word $word}, {$word "\0\0\0\0after"} for each word.
- set count 0
- while { $count < $nkeys } {
- # Get the first item of each set of three.
- # We don't know what the word is, but set $word to
- # the key and check that the data is
- # "\0\0\0\0\0\0before".
- set dbt [$dbc get -next]
- set word [lindex [lindex $dbt 0] 0]
-
- error_check_good dbc_get_one $dbt \
- [list [list $word "\0\0\0\0\0\0before"]]
-
- set dbt [$dbc get -next]
- error_check_good \
- dbc_get_two $dbt [list [list $word $word]]
-
- set dbt [$dbc get -next]
- error_check_good dbc_get_three $dbt \
- [list [list $word "\0\0\0\0after"]]
-
- incr count
- }
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- }
-}
diff --git a/storage/bdb/test/test069.tcl b/storage/bdb/test/test069.tcl
deleted file mode 100644
index 46104ffa28b..00000000000
--- a/storage/bdb/test/test069.tcl
+++ /dev/null
@@ -1,14 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test069.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test069
-# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
-# TEST small ndups to ensure that partial puts to DB_CURRENT work
-# TEST correctly in the absence of duplicate pages.
-proc test069 { method {ndups 50} {tnum "069"} args } {
- eval test067 $method $ndups $tnum $args
-}
diff --git a/storage/bdb/test/test070.tcl b/storage/bdb/test/test070.tcl
deleted file mode 100644
index 9d124a77b64..00000000000
--- a/storage/bdb/test/test070.tcl
+++ /dev/null
@@ -1,136 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test070.tcl,v 11.33 2004/02/17 16:29:07 dda Exp $
-#
-# TEST test070
-# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
-# TEST
-# TEST Fork off six processes, four consumers and two producers.
-# TEST The producers will each put 20000 records into a queue;
-# TEST the consumers will each get 10000.
-# TEST Then, verify that no record was lost or retrieved twice.
-proc test070 { method {nconsumers 4} {nproducers 2} \
- {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum "070"} args } {
- source ./include.tcl
- global alphabet
- global encrypt
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test$tnum skipping for env $env"
- return
- }
- set omethod [convert_method $method]
- set args [convert_args $method $args]
- if { $encrypt != 0 } {
- puts "Test$tnum skipping for security"
- return
- }
-
- puts "Test$tnum: $method ($args) Test of DB_$mode flag to DB->get."
- puts "\tUsing $txn environment."
-
- error_check_good enough_consumers [expr $nconsumers > 0] 1
- error_check_good enough_producers [expr $nproducers > 0] 1
-
- if { [is_queue $method] != 1 } {
- puts "\tSkipping Test$tnum for method $method."
- return
- }
-
- env_cleanup $testdir
- set testfile test$tnum.db
-
- # Create environment
- set dbenv [eval {berkdb_env -create $txn -home } $testdir]
- error_check_good dbenv_create [is_valid_env $dbenv] TRUE
-
- # Create database
- set db [eval {berkdb_open -create -mode 0644 -queue}\
- -env $dbenv $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- if { $start != 0 } {
- error_check_good set_seed [$db put $start "consumer data"] 0
- puts "\tTest$tnum: starting at $start."
- } else {
- incr start
- }
-
- set pidlist {}
-
- # Divvy up the total number of records amongst the consumers and
- # producers.
- error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
- error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
- set nperconsumer [expr $nitems / $nconsumers]
- set nperproducer [expr $nitems / $nproducers]
-
- set consumerlog $testdir/CONSUMERLOG.
-
- # Fork consumer processes (we want them to be hungry)
- for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
- set output $consumerlog$ndx
- set p [exec $tclsh_path $test_path/wrap.tcl \
- conscript.tcl $testdir/conscript.log.consumer$ndx \
- $testdir $testfile $mode $nperconsumer $output $tnum \
- $args &]
- lappend pidlist $p
- }
- for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
- set p [exec $tclsh_path $test_path/wrap.tcl \
- conscript.tcl $testdir/conscript.log.producer$ndx \
- $testdir $testfile PRODUCE $nperproducer "" $tnum \
- $args &]
- lappend pidlist $p
- }
-
- # Wait for all children.
- watch_procs $pidlist 10
-
- # Verify: slurp all record numbers into list, sort, and make
- # sure each appears exactly once.
- puts "\tTest$tnum: Verifying results."
- set reclist {}
- for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
- set input $consumerlog$ndx
- set iid [open $input r]
- while { [gets $iid str] != -1 } {
- lappend reclist $str
- }
- close $iid
- }
- set sortreclist [lsort -command int32_compare $reclist]
-
- set nitems [expr $start + $nitems]
- for { set ndx $start } { $ndx < $nitems } { set ndx [expr $ndx + 1] } {
- # Wrap if $ndx goes beyond 32 bits because our
- # recno wrapped if it did.
- if { $ndx > 0xffffffff } {
- set cmp [expr $ndx - 0xffffffff]
- } else {
- set cmp [expr $ndx + 0]
- }
- # Skip 0 if we are wrapping around
- if { $cmp == 0 } {
- incr ndx
- incr nitems
- incr cmp
- }
- # Be sure to convert ndx to a number before comparing.
- error_check_good pop_num [lindex $sortreclist 0] $cmp
- set sortreclist [lreplace $sortreclist 0 0]
- }
- error_check_good list_ends_empty $sortreclist {}
- error_check_good db_close [$db close] 0
- error_check_good dbenv_close [$dbenv close] 0
-
- puts "\tTest$tnum completed successfully."
-}
diff --git a/storage/bdb/test/test071.tcl b/storage/bdb/test/test071.tcl
deleted file mode 100644
index 7834c82ad4d..00000000000
--- a/storage/bdb/test/test071.tcl
+++ /dev/null
@@ -1,16 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test071.tcl,v 11.14 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test071
-# TEST Test of DB_CONSUME (One consumer, 10000 items.)
-# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
-proc test071 { method {nconsumers 1} {nproducers 1} {nitems 10000} \
- {mode CONSUME} {start 0 } {txn -txn} {tnum "071"} args } {
-
- eval test070 $method \
- $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
-}
diff --git a/storage/bdb/test/test072.tcl b/storage/bdb/test/test072.tcl
deleted file mode 100644
index 3a8ff7aa28e..00000000000
--- a/storage/bdb/test/test072.tcl
+++ /dev/null
@@ -1,257 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test072.tcl,v 11.34 2004/05/13 18:51:44 mjc Exp $
-#
-# TEST test072
-# TEST Test of cursor stability when duplicates are moved off-page.
-proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } {
- source ./include.tcl
- global alphabet
- global is_je_test
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile name should just be
- # the db name. Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set basename $testdir/test$tnum
- set env NULL
- } else {
- set basename test$tnum
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- # Keys must sort $prekey < $key < $postkey.
- set prekey "a key"
- set key "the key"
- set postkey "z key"
-
- # Make these distinguishable from each other and from the
- # alphabets used for the $key's data.
- set predatum "1234567890"
- set postdatum "0987654321"
-
- puts -nonewline "Test$tnum $omethod ($args): "
- if { [is_record_based $method] || [is_rbtree $method] } {
- puts "Skipping for method $method."
- return
- } else {
- puts "\nTest$tnum: Test of cursor stability when\
- duplicates are moved off-page."
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test$tnum: skipping for specific pagesizes"
- return
- }
-
- append args " -pagesize $pagesize "
- set txn ""
-
- set dlist [list "-dup" "-dup -dupsort"]
- set testid 0
- foreach dupopt $dlist {
- if { $is_je_test && $dupopt == "-dup" } {
- continue
- }
-
- incr testid
- set duptestfile $basename$testid.db
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $args $dupopt {$duptestfile}]
- error_check_good "db open" [is_valid_db $db] TRUE
-
- puts \
-"\tTest$tnum.a: ($dupopt) Set up surrounding keys and cursors."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$prekey $predatum}]
- error_check_good pre_put $ret 0
- set ret [eval {$db put} $txn {$postkey $postdatum}]
- error_check_good post_put $ret 0
-
- set precursor [eval {$db cursor} $txn]
- error_check_good precursor [is_valid_cursor $precursor \
- $db] TRUE
- set postcursor [eval {$db cursor} $txn]
- error_check_good postcursor [is_valid_cursor $postcursor \
- $db] TRUE
- error_check_good preset [$precursor get -set $prekey] \
- [list [list $prekey $predatum]]
- error_check_good postset [$postcursor get -set $postkey] \
- [list [list $postkey $postdatum]]
-
- puts "\tTest$tnum.b: Put/create cursor/verify all cursor loop."
-
- for { set i 0 } { $i < $ndups } { incr i } {
- set datum [format "%4d$alphabet" [expr $i + 1000]]
- set data($i) $datum
-
- # Uncomment these lines to see intermediate steps.
- # error_check_good db_sync($i) [$db sync] 0
- # error_check_good db_dump($i) \
- # [catch {exec $util_path/db_dump \
- # -da $duptestfile > $testdir/out.$i}] 0
-
- set ret [eval {$db put} $txn {$key $datum}]
- error_check_good "db put ($i)" $ret 0
-
- set dbc($i) [eval {$db cursor} $txn]
- error_check_good "db cursor ($i)"\
- [is_valid_cursor $dbc($i) $db] TRUE
-
- error_check_good "dbc get -get_both ($i)"\
- [$dbc($i) get -get_both $key $datum]\
- [list [list $key $datum]]
-
- for { set j 0 } { $j < $i } { incr j } {
- set dbt [$dbc($j) get -current]
- set k [lindex [lindex $dbt 0] 0]
- set d [lindex [lindex $dbt 0] 1]
-
- #puts "cursor $j after $i: $d"
-
- eval {$db sync}
-
- error_check_good\
- "cursor $j key correctness after $i puts" \
- $k $key
- error_check_good\
- "cursor $j data correctness after $i puts" \
- $d $data($j)
- }
-
- # Check correctness of pre- and post- cursors. Do an
- # error_check_good on the lengths first so that we don't
- # spew garbage as the "got" field and screw up our
- # terminal. (It's happened here.)
- set pre_dbt [$precursor get -current]
- set post_dbt [$postcursor get -current]
- error_check_good \
- "key earlier cursor correctness after $i puts" \
- [string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
- error_check_good \
- "data earlier cursor correctness after $i puts" \
- [string length [lindex [lindex $pre_dbt 0] 1]] \
- [string length $predatum]
- error_check_good \
- "key later cursor correctness after $i puts" \
- [string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
- error_check_good \
- "data later cursor correctness after $i puts" \
- [string length [lindex [lindex $post_dbt 0] 1]]\
- [string length $postdatum]
-
- error_check_good \
- "earlier cursor correctness after $i puts" \
- $pre_dbt [list [list $prekey $predatum]]
- error_check_good \
- "later cursor correctness after $i puts" \
- $post_dbt [list [list $postkey $postdatum]]
- }
-
- puts "\tTest$tnum.c: Reverse Put/create cursor/verify all cursor loop."
- set end [expr $ndups * 2 - 1]
- for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
- set datum [format "%4d$alphabet" [expr $i + 1000]]
- set data($i) $datum
-
- # Uncomment these lines to see intermediate steps.
- # error_check_good db_sync($i) [$db sync] 0
- # error_check_good db_dump($i) \
- # [catch {exec $util_path/db_dump \
- # -da $duptestfile > $testdir/out.$i}] 0
-
- set ret [eval {$db put} $txn {$key $datum}]
- error_check_good "db put ($i)" $ret 0
-
- error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
- set dbc($i) [eval {$db cursor} $txn]
- error_check_good "db cursor ($i)"\
- [is_valid_cursor $dbc($i) $db] TRUE
-
- error_check_good "dbc get -get_both ($i)"\
- [$dbc($i) get -get_both $key $datum]\
- [list [list $key $datum]]
-
- for { set j $i } { $j < $end } { incr j } {
- set dbt [$dbc($j) get -current]
- set k [lindex [lindex $dbt 0] 0]
- set d [lindex [lindex $dbt 0] 1]
-
- #puts "cursor $j after $i: $d"
-
- eval {$db sync}
-
- error_check_good\
- "cursor $j key correctness after $i puts" \
- $k $key
- error_check_good\
- "cursor $j data correctness after $i puts" \
- $d $data($j)
- }
-
- # Check correctness of pre- and post- cursors. Do an
- # error_check_good on the lengths first so that we don't
- # spew garbage as the "got" field and screw up our
- # terminal. (It's happened here.)
- set pre_dbt [$precursor get -current]
- set post_dbt [$postcursor get -current]
- error_check_good \
- "key earlier cursor correctness after $i puts" \
- [string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
- error_check_good \
- "data earlier cursor correctness after $i puts" \
- [string length [lindex [lindex $pre_dbt 0] 1]] \
- [string length $predatum]
- error_check_good \
- "key later cursor correctness after $i puts" \
- [string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
- error_check_good \
- "data later cursor correctness after $i puts" \
- [string length [lindex [lindex $post_dbt 0] 1]]\
- [string length $postdatum]
-
- error_check_good \
- "earlier cursor correctness after $i puts" \
- $pre_dbt [list [list $prekey $predatum]]
- error_check_good \
- "later cursor correctness after $i puts" \
- $post_dbt [list [list $postkey $postdatum]]
- }
-
- # Close cursors.
- puts "\tTest$tnum.d: Closing cursors."
- for { set i 0 } { $i <= $end } { incr i } {
- error_check_good "dbc close ($i)" [$dbc($i) close] 0
- }
- unset dbc
- error_check_good precursor_close [$precursor close] 0
- error_check_good postcursor_close [$postcursor close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good "db close" [$db close] 0
- }
-}
diff --git a/storage/bdb/test/test073.tcl b/storage/bdb/test/test073.tcl
deleted file mode 100644
index bac753ea6c5..00000000000
--- a/storage/bdb/test/test073.tcl
+++ /dev/null
@@ -1,290 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test073.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test073
-# TEST Test of cursor stability on duplicate pages.
-# TEST
-# TEST Does the following:
-# TEST a. Initialize things by DB->putting ndups dups and
-# TEST setting a reference cursor to point to each.
-# TEST b. c_put ndups dups (and correspondingly expanding
-# TEST the set of reference cursors) after the last one, making sure
-# TEST after each step that all the reference cursors still point to
-# TEST the right item.
-# TEST c. Ditto, but before the first one.
-# TEST d. Ditto, but after each one in sequence first to last.
-# TEST e. Ditto, but after each one in sequence from last to first.
-# TEST occur relative to the new datum)
-# TEST f. Ditto for the two sequence tests, only doing a
-# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# TEST new one.
-proc test073 { method {pagesize 512} {ndups 50} {tnum "073"} args } {
- source ./include.tcl
- global alphabet
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set key "the key"
- set txn ""
-
- puts -nonewline "Test$tnum $omethod ($args): "
- if { [is_record_based $method] || [is_rbtree $method] } {
- puts "Skipping for method $method."
- return
- } else {
- puts "cursor stability on duplicate pages."
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test073: skipping for specific pagesizes"
- return
- }
-
- append args " -pagesize $pagesize -dup"
-
- set db [eval {berkdb_open \
- -create -mode 0644} $omethod $args $testfile]
- error_check_good "db open" [is_valid_db $db] TRUE
-
- # Number of outstanding keys.
- set keys 0
-
- puts "\tTest$tnum.a.1: Initializing put loop; $ndups dups, short data."
-
- for { set i 0 } { $i < $ndups } { incr i } {
- set datum [makedatum_t73 $i 0]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $datum}]
- error_check_good "db put ($i)" $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set is_long($i) 0
- incr keys
- }
-
- puts "\tTest$tnum.a.2: Initializing cursor get loop; $keys dups."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for { set i 0 } { $i < $keys } { incr i } {
- set datum [makedatum_t73 $i 0]
-
- set dbc($i) [eval {$db cursor} $txn]
- error_check_good "db cursor ($i)"\
- [is_valid_cursor $dbc($i) $db] TRUE
- error_check_good "dbc get -get_both ($i)"\
- [$dbc($i) get -get_both $key $datum]\
- [list [list $key $datum]]
- }
-
- puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
- short data."
-
- for { set i 0 } { $i < $ndups } { incr i } {
- # !!! keys contains the number of the next dup
- # to be added (since they start from zero)
-
- set datum [makedatum_t73 $keys 0]
- set curs [eval {$db cursor} $txn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
- error_check_good "c_put(DB_KEYLAST, $keys)"\
- [$curs put -keylast $key $datum] 0
-
- set dbc($keys) $curs
- set is_long($keys) 0
- incr keys
-
- verify_t73 is_long dbc $keys $key
- }
-
- puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
- short data."
-
- for { set i 0 } { $i < $ndups } { incr i } {
- # !!! keys contains the number of the next dup
- # to be added (since they start from zero)
-
- set datum [makedatum_t73 $keys 0]
- set curs [eval {$db cursor} $txn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
- error_check_good "c_put(DB_KEYFIRST, $keys)"\
- [$curs put -keyfirst $key $datum] 0
-
- set dbc($keys) $curs
- set is_long($keys) 0
- incr keys
-
- verify_t73 is_long dbc $keys $key
- }
-
- puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\
- $keys new dups, short data"
- # We want to add a datum after each key from 0 to the current
- # value of $keys, which we thus need to save.
- set keysnow $keys
- for { set i 0 } { $i < $keysnow } { incr i } {
- set datum [makedatum_t73 $keys 0]
- set curs [eval {$db cursor} $txn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- # Which datum to insert this guy after.
- set curdatum [makedatum_t73 $i 0]
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $curdatum]\
- [list [list $key $curdatum]]
- error_check_good "c_put(DB_AFTER, $i)"\
- [$curs put -after $datum] 0
-
- set dbc($keys) $curs
- set is_long($keys) 0
- incr keys
-
- verify_t73 is_long dbc $keys $key
- }
-
- puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\
- $keys new dups, short data"
-
- for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
- set datum [makedatum_t73 $keys 0]
- set curs [eval {$db cursor} $txn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- # Which datum to insert this guy before.
- set curdatum [makedatum_t73 $i 0]
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $curdatum]\
- [list [list $key $curdatum]]
- error_check_good "c_put(DB_BEFORE, $i)"\
- [$curs put -before $datum] 0
-
- set dbc($keys) $curs
- set is_long($keys) 0
- incr keys
-
- if { $i % 10 == 1 } {
- verify_t73 is_long dbc $keys $key
- }
- }
- verify_t73 is_long dbc $keys $key
-
- puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\
- growing $keys data."
- set keysnow $keys
- for { set i 0 } { $i < $keysnow } { incr i } {
- set olddatum [makedatum_t73 $i 0]
- set newdatum [makedatum_t73 $i 1]
- set curs [eval {$db cursor} $txn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $olddatum]\
- [list [list $key $olddatum]]
- error_check_good "c_put(DB_CURRENT, $i)"\
- [$curs put -current $newdatum] 0
-
- error_check_good "cursor close" [$curs close] 0
-
- set is_long($i) 1
-
- if { $i % 10 == 1 } {
- verify_t73 is_long dbc $keys $key
- }
- }
- verify_t73 is_long dbc $keys $key
-
- # Close cursors.
- puts "\tTest$tnum.g: Closing cursors."
- for { set i 0 } { $i < $keys } { incr i } {
- error_check_good "dbc close ($i)" [$dbc($i) close] 0
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good "db close" [$db close] 0
-}
-
-# !!!: This procedure is also used by test087.
-proc makedatum_t73 { num is_long } {
- global alphabet
- if { $is_long == 1 } {
- set a $alphabet$alphabet$alphabet
- } else {
- set a abcdefghijklm
- }
-
- # format won't do leading zeros, alas.
- if { $num / 1000 > 0 } {
- set i $num
- } elseif { $num / 100 > 0 } {
- set i 0$num
- } elseif { $num / 10 > 0 } {
- set i 00$num
- } else {
- set i 000$num
- }
-
- return $i$a
-}
-
-# !!!: This procedure is also used by test087.
-proc verify_t73 { is_long_array curs_array numkeys key } {
- upvar $is_long_array is_long
- upvar $curs_array dbc
- upvar db db
-
- #useful for debugging, perhaps.
- eval $db sync
-
- for { set j 0 } { $j < $numkeys } { incr j } {
- set dbt [$dbc($j) get -current]
- set k [lindex [lindex $dbt 0] 0]
- set d [lindex [lindex $dbt 0] 1]
-
- error_check_good\
- "cursor $j key correctness (with $numkeys total items)"\
- $k $key
- error_check_good\
- "cursor $j data correctness (with $numkeys total items)"\
- $d [makedatum_t73 $j $is_long($j)]
- }
-}
diff --git a/storage/bdb/test/test074.tcl b/storage/bdb/test/test074.tcl
deleted file mode 100644
index 8302a230221..00000000000
--- a/storage/bdb/test/test074.tcl
+++ /dev/null
@@ -1,275 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test074.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test074
-# TEST Test of DB_NEXT_NODUP.
-proc test074 { method {dir -nextnodup} {nitems 100} {tnum "074"} args } {
- source ./include.tcl
- global alphabet
- global is_je_test
- global rand_init
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- berkdb srand $rand_init
-
- # Data prefix--big enough that we get a mix of on-page, off-page,
- # and multi-off-page dups with the default nitems
- if { [is_fixed_length $method] == 1 } {
- set globaldata "somedata"
- } else {
- set globaldata [repeat $alphabet 4]
- }
-
- puts "Test$tnum $omethod ($args): Test of $dir"
-
- # First, test non-dup (and not-very-interesting) case with
- # all db types.
-
- puts "\tTest$tnum.a: No duplicates."
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-nodup.db
- set env NULL
- } else {
- set testfile test$tnum-nodup.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- set db [eval {berkdb_open -create -mode 0644} $omethod\
- $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn ""
-
- # Insert nitems items.
- puts "\t\tTest$tnum.a.1: Put loop."
- for {set i 1} {$i <= $nitems} {incr i} {
- #
- # If record based, set key to $i * 2 to leave
- # holes/unused entries for further testing.
- #
- if {[is_record_based $method] == 1} {
- set key [expr $i * 2]
- } else {
- set key "key$i"
- }
- set data "$globaldata$i"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key \
- [chop_data $method $data]}]
- error_check_good put($i) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- puts "\t\tTest$tnum.a.2: Get($dir)"
-
- # foundarray($i) is set when key number i is found in the database
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- # Initialize foundarray($i) to zero for all $i
- for {set i 1} {$i < $nitems} {incr i} {
- set foundarray($i) 0
- }
-
- # Walk database using $dir and record each key gotten.
- for {set i 1} {$i <= $nitems} {incr i} {
- set dbt [$dbc get $dir]
- set key [lindex [lindex $dbt 0] 0]
- if {[is_record_based $method] == 1} {
- set num [expr $key / 2]
- set desired_key $key
- error_check_good $method:num $key [expr $num * 2]
- } else {
- set num [string range $key 3 end]
- set desired_key key$num
- }
-
- error_check_good dbt_correct($i) $dbt\
- [list [list $desired_key\
- [pad_data $method $globaldata$num]]]
-
- set foundarray($num) 1
- }
-
- puts "\t\tTest$tnum.a.3: Final key."
- error_check_good last_db_get [$dbc get $dir] [list]
-
- puts "\t\tTest$tnum.a.4: Verify loop."
- for { set i 1 } { $i <= $nitems } { incr i } {
- error_check_good found_key($i) $foundarray($i) 1
- }
-
- error_check_good dbc_close(nodup) [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- # If we are a method that doesn't allow dups, verify that
- # we get an empty list if we try to use DB_NEXT_DUP
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- puts "\t\tTest$tnum.a.5: Check DB_NEXT_DUP for $method."
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set dbt [$dbc get $dir]
- error_check_good $method:nextdup [$dbc get -nextdup] [list]
- error_check_good dbc_close(nextdup) [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- error_check_good db_close(nodup) [$db close] 0
-
- # Quit here if we're a method that won't allow dups.
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "\tTest$tnum: Skipping remainder for method $method."
- return
- }
-
- foreach opt { "-dup" "-dupsort" } {
- if { $is_je_test && $opt == "-dup" } {
- continue
- }
-
- #
- # If we are using an env, then testfile should just be the
- # db name. Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum$opt.db
- } else {
- set testfile test$tnum$opt.db
- }
-
- if { [string compare $opt "-dupsort"] == 0 } {
- set opt "-dup -dupsort"
- }
-
- puts "\tTest$tnum.b: Duplicates ($opt)."
-
- puts "\t\tTest$tnum.b.1 ($opt): Put loop."
- set db [eval {berkdb_open -create -mode 0644}\
- $opt $omethod $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # Insert nitems different keys such that key i has i dups.
- for {set i 1} {$i <= $nitems} {incr i} {
- set key key$i
-
- for {set j 1} {$j <= $i} {incr j} {
- if { $j < 10 } {
- set data "${globaldata}00$j"
- } elseif { $j < 100 } {
- set data "${globaldata}0$j"
- } else {
- set data "$globaldata$j"
- }
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn \
- [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good put($i,$j) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- }
-
- # Initialize foundarray($i) to 0 for all i.
- unset foundarray
- for { set i 1 } { $i <= $nitems } { incr i } {
- set foundarray($i) 0
- }
-
- # Get loop--after each get, move forward a random increment
- # within the duplicate set.
- puts "\t\tTest$tnum.b.2 ($opt): Get loop."
- set one "001"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
- for { set i 1 } { $i <= $nitems } { incr i } {
- set dbt [$dbc get $dir]
- set key [lindex [lindex $dbt 0] 0]
- set num [string range $key 3 end]
-
- set desired_key key$num
- if { [string compare $dir "-prevnodup"] == 0 } {
- if { $num < 10 } {
- set one "00$num"
- } elseif { $num < 100 } {
- set one "0$num"
- } else {
- set one $num
- }
- }
-
- error_check_good dbt_correct($i) $dbt\
- [list [list $desired_key\
- "$globaldata$one"]]
-
- set foundarray($num) 1
-
- # Go forward by some number w/i dup set.
- set inc [berkdb random_int 0 [expr $num - 1]]
- for { set j 0 } { $j < $inc } { incr j } {
- eval {$dbc get -nextdup}
- }
- }
-
- puts "\t\tTest$tnum.b.3 ($opt): Final key."
- error_check_good last_db_get($opt) [$dbc get $dir] [list]
-
- # Verify
- puts "\t\tTest$tnum.b.4 ($opt): Verify loop."
- for { set i 1 } { $i <= $nitems } { incr i } {
- error_check_good found_key($i) $foundarray($i) 1
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- }
-}
diff --git a/storage/bdb/test/test075.tcl b/storage/bdb/test/test075.tcl
deleted file mode 100644
index 540d8f0ed73..00000000000
--- a/storage/bdb/test/test075.tcl
+++ /dev/null
@@ -1,205 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2002
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $
-#
-# TEST test075
-# TEST Test of DB->rename().
-# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
-proc test075 { method { tnum 75 } args } {
- global encrypt
- global errorCode
- global errorInfo
-
- source ./include.tcl
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- puts "Test0$tnum: $method ($args): Test of DB->rename()"
- # If we are using an env, then testfile should just be the
- # db name. Otherwise it is the test directory and the name.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- # If we are using an env, then skip this test.
- # It needs its own.
- incr eindex
- set env [lindex $args $eindex]
- puts "Skipping test075 for env $env"
- return
- }
- if { $encrypt != 0 } {
- puts "Skipping test075 for security"
- return
- }
-
- # Define absolute pathnames
- set curdir [pwd]
- cd $testdir
- set fulldir [pwd]
- cd $curdir
- set reldir $testdir
-
- # Set up absolute and relative pathnames for test
- set paths [list $fulldir $reldir]
- foreach path $paths {
- puts "\tTest0$tnum: starting test of $path path"
- set oldfile $path/test0$tnum-old.db
- set newfile $path/test0$tnum.db
- set env NULL
- set envargs ""
-
- # Loop through test using the following rename options
- # 1. no environment, not in transaction
- # 2. with environment, not in transaction
- # 3. rename with auto-commit
- # 4. rename in committed transaction
- # 5. rename in aborted transaction
-
- foreach op "noenv env auto commit abort" {
-
- puts "\tTest0$tnum.a: Create/rename file with $op"
-
- # Make sure we're starting with a clean slate.
-
- if { $op == "noenv" } {
- cleanup $path $env
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" \
- [file exists $oldfile] 1
- error_check_bad "$newfile exists" \
- [file exists $newfile] 1
- }
- }
-
- if { $op == "env" } {
- env_cleanup $path
- set env [berkdb_env -create -home $path]
- set envargs "-env $env"
- error_check_good env_open [is_valid_env $env] TRUE
- }
-
- if { $op == "auto" || $op == "commit" || $op == "abort" } {
- env_cleanup $path
- set env [berkdb_env -create -home $path -txn]
- set envargs "-env $env"
- error_check_good env_open [is_valid_env $env] TRUE
- }
-
- puts "\t\tTest0$tnum.a.1: create"
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $envargs $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- if { $env == "NULL" } {
- error_check_bad \
- "$oldfile exists" [file exists $oldfile] 0
- error_check_bad \
- "$newfile exists" [file exists $newfile] 1
- }
-
- # The nature of the key and data are unimportant;
- # use numeric key to record-based methods don't need
- # special treatment.
- set key 1
- set data [pad_data $method data]
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\t\tTest0$tnum.a.2: rename"
- if { $env == "NULL" } {
- error_check_bad \
- "$oldfile exists" [file exists $oldfile] 0
- error_check_bad \
- "$newfile exists" [file exists $newfile] 1
- }
-
- # Regular renames use berkdb dbrename but transaction
- # protected renames must use $env dbrename.
- if { $op == "noenv" || $op == "env" } {
- error_check_good rename_file [eval {berkdb dbrename} \
- $envargs $oldfile $newfile] 0
- } elseif { $op == "auto" } {
- error_check_good rename_file [eval {$env dbrename} \
- -auto_commit $oldfile $newfile] 0
- } else {
- # $op is "abort" or "commit"
- set txn [$env txn]
- error_check_good rename_file [eval {$env dbrename} \
- -txn $txn $oldfile $newfile] 0
- error_check_good txn_$op [$txn $op] 0
- }
-
- if { $env == "NULL" } {
- error_check_bad \
- "$oldfile exists" [file exists $oldfile] 1
- error_check_bad \
- "$newfile exists" [file exists $newfile] 0
- }
-
- puts "\t\tTest0$tnum.a.3: check"
- # Open again with create to make sure we're not caching or
- # anything silly. In the normal case (no env), we already
- # know the file doesn't exist.
- set odb [eval {berkdb_open -create -mode 0644} \
- $envargs $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} \
- $envargs $omethod $args $newfile]
- error_check_good odb_open [is_valid_db $odb] TRUE
- error_check_good ndb_open [is_valid_db $ndb] TRUE
-
- # The DBT from the "old" database should be empty,
- # not the "new" one, except in the case of an abort.
- set odbt [$odb get $key]
- if { $op == "abort" } {
- error_check_good odbt_has_data [llength $odbt] 1
- } else {
- set ndbt [$ndb get $key]
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
- error_check_good ndbt [lindex \
- [lindex $ndbt 0] 1] $data
- }
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
-
- # Now there's both an old and a new. Rename the
- # "new" to the "old" and make sure that fails.
- #
- # XXX Ideally we'd do this test even when there's
- # an external environment, but that env has
- # errpfx/errfile set now. :-(
- puts "\tTest0$tnum.b: Make sure rename fails\
- instead of overwriting"
- if { $env != "NULL" } {
- error_check_good env_close [$env close] 0
- set env [berkdb_env_noerr -home $path]
- error_check_good env_open2 \
- [is_valid_env $env] TRUE
- set ret [catch {eval {berkdb dbrename} \
- -env $env $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret \
- [is_substr $errorCode EEXIST] 1
- }
-
- # Verify and then start over from a clean slate.
- verify_dir $path "\tTest0$tnum.c: "
- cleanup $path $env
- if { $env != "NULL" } {
- error_check_good env_close [$env close] 0
- }
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" \
- [file exists $oldfile] 1
- error_check_bad "$newfile exists" \
- [file exists $newfile] 1
-
- set oldfile test0$tnum-old.db
- set newfile test0$tnum.db
- }
- }
- }
-}
diff --git a/storage/bdb/test/test076.tcl b/storage/bdb/test/test076.tcl
deleted file mode 100644
index 49827d35306..00000000000
--- a/storage/bdb/test/test076.tcl
+++ /dev/null
@@ -1,84 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test076.tcl,v 1.22 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test076
-# TEST Test creation of many small databases in a single environment. [#1528].
-proc test076 { method { ndbs 1000 } { tnum "076" } args } {
- global is_qnx_test
- source ./include.tcl
-
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- if { [is_record_based $method] == 1 } {
- set key ""
- } else {
- set key "key"
- }
- set data "datamoredatamoredata"
-
- # Create an env if we weren't passed one.
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- if { $eindex == -1 } {
- set deleteenv 1
- env_cleanup $testdir
- set env [eval {berkdb_env -create -home} $testdir $encargs]
- error_check_good env [is_valid_env $env] TRUE
- set args "$args -env $env"
- } else {
- set deleteenv 0
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- if { $ndbs == 1000 } {
- set ndbs 100
- }
- }
- set testdir [get_home $env]
- }
- if { $is_qnx_test && $ndbs > 100 } {
- set ndbs 100
- }
- puts -nonewline "Test$tnum $method ($args): "
- puts -nonewline "Create $ndbs"
- puts " small databases in one env."
-
- cleanup $testdir $env
- set txn ""
-
- for { set i 1 } { $i <= $ndbs } { incr i } {
- set testfile test$tnum.$i.db
-
- set db [eval {berkdb_open -create -mode 0644}\
- $args $omethod $testfile]
- error_check_good db_open($i) [is_valid_db $db] TRUE
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key$i \
- [chop_data $method $data$i]}]
- error_check_good db_put($i) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close($i) [$db close] 0
- }
-
- if { $deleteenv == 1 } {
- error_check_good env_close [$env close] 0
- }
-
- puts "\tTest$tnum passed."
-}
diff --git a/storage/bdb/test/test077.tcl b/storage/bdb/test/test077.tcl
deleted file mode 100644
index 3c1fd869b22..00000000000
--- a/storage/bdb/test/test077.tcl
+++ /dev/null
@@ -1,93 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test077.tcl,v 1.14 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test077
-# TEST Test of DB_GET_RECNO [#1206].
-proc test077 { method { nkeys 1000 } { tnum "077" } args } {
- source ./include.tcl
- global alphabet
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- puts "Test$tnum: Test of DB_GET_RECNO."
-
- if { [is_rbtree $method] != 1 } {
- puts "\tTest$tnum: Skipping for method $method."
- return
- }
-
- set data $alphabet
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- puts "\tTest$tnum.a: Populating database."
- set txn ""
-
- for { set i 1 } { $i <= $nkeys } { incr i } {
- set key [format %5d $i]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$key $data}]
- error_check_good db_put($key) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- puts "\tTest$tnum.b: Verifying record numbers."
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
-
- set i 1
- for { set dbt [$dbc get -first] } \
- { [string length $dbt] != 0 } \
- { set dbt [$dbc get -next] } {
- set recno [$dbc get -get_recno]
- set keynum [expr [lindex [lindex $dbt 0] 0]]
-
- # Verify that i, the number that is the key, and recno
- # are all equal.
- error_check_good key($i) $keynum $i
- error_check_good recno($i) $recno $i
- incr i
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test078.tcl b/storage/bdb/test/test078.tcl
deleted file mode 100644
index 549fc13c7e7..00000000000
--- a/storage/bdb/test/test078.tcl
+++ /dev/null
@@ -1,245 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test078.tcl,v 1.26 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test078
-# TEST Test of DBC->c_count(). [#303]
-proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } {
- source ./include.tcl
- global alphabet
- global is_je_test
- global rand_init
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test$tnum ($method): Test of key counts."
-
- berkdb srand $rand_init
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- }
-
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-a.db
- set env NULL
- } else {
- set testfile test$tnum-a.db
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- set nkeys 50
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test078: skipping for specific pagesizes"
- return
- }
- puts "\tTest$tnum.a: No duplicates, trivial answer."
- puts "\t\tTest$tnum.a.1: Populate database, verify dup counts."
- set db [eval {berkdb_open -create -mode 0644\
- -pagesize $pagesize} $omethod $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
- set txn ""
-
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$i\
- [pad_data $method $alphabet$i]}]
- error_check_good put.a($i) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good count.a [$db count $i] 1
- }
-
- if { [is_rrecno $method] == 1 } {
- error_check_good db_close.a [$db close] 0
- puts "\tTest$tnum.a2: Skipping remainder of test078 for -rrecno."
- return
- }
-
- puts "\t\tTest$tnum.a.2: Delete items, verify dup counts again."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db del} $txn $i]
- error_check_good del.a($i) $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good count.a [$db count $i] 0
- }
-
-
- error_check_good db_close.a [$db close] 0
-
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts \
- "\tTest$tnum.b: Duplicates not supported in $method, skipping."
- return
- }
-
- foreach {let descrip dupopt} \
- {b sorted "-dup -dupsort" c unsorted "-dup"} {
-
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-b.db
- set env NULL
- } else {
- set testfile test$tnum-b.db
- set env [lindex $args $eindex]
- if { $is_je_test && $dupopt == "-dup" } {
- continue
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "\tTest$tnum.$let: Duplicates ($descrip)."
- puts "\t\tTest$tnum.$let.1: Populating database."
-
- set db [eval {berkdb_open -create -mode 0644\
- -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- for { set i 1 } { $i <= $nkeys } { incr i } {
- for { set j 0 } { $j < $i } { incr j } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn \
- [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$i\
- [pad_data $method $j$alphabet]}]
- error_check_good put.$let,$i $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
- }
-
- puts -nonewline "\t\tTest$tnum.$let.2: "
- puts "Verifying duplicate counts."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- error_check_good count.$let,$i \
- [$db count $i] $i
- }
-
- puts -nonewline "\t\tTest$tnum.$let.3: "
- puts "Delete every other dup by cursor, verify counts."
-
- # Delete every other item by cursor and check counts.
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set c [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- set j 0
-
- for { set ret [$c get -first]} { [llength $ret] > 0 } \
- { set ret [$c get -next]} {
- set key [lindex [lindex $ret 0] 0]
- if { $key == $i } {
- set data [lindex [lindex $ret 0 ] 1]
- set num [string range $data 0 \
- end-[string length $alphabet]]
- if { [expr $num % 2] == 0 } {
- error_check_good \
- c_del [$c del] 0
- incr j
- }
- if { $txnenv == 0 } {
- error_check_good count.$let.$i-$j \
- [$db count $i] [expr $i - $j]
- }
- }
- }
- error_check_good curs_close [$c close] 0
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
- error_check_good count.$let.$i-$j \
- [$db count $i] [expr $i - $j]
- }
-
- puts -nonewline "\t\tTest$tnum.$let.4: "
- puts "Delete all items by cursor, verify counts."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set c [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- for { set ret [$c get -first]} { [llength $ret] > 0 } \
- { set ret [$c get -next]} {
- set key [lindex [lindex $ret 0] 0]
- if { $key == $i } {
- error_check_good c_del [$c del] 0
- }
- }
- error_check_good curs_close [$c close] 0
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
- error_check_good db_count_zero [$db count $i] 0
- }
-
- puts -nonewline "\t\tTest$tnum.$let.5: "
- puts "Add back one item, verify counts."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {$i\
- [pad_data $method $alphabet]}]
- error_check_good put.$let,$i $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good add_one [$db count $i] 1
- }
-
- puts -nonewline "\t\tTest$tnum.$let.6: "
- puts "Delete remaining entries, verify counts."
- for { set i 1 } { $i <= $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- error_check_good db_del [eval {$db del} $txn {$i}] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good count.$let.$i [$db count $i] 0
- }
- error_check_good db_close.$let [$db close] 0
- }
-}
diff --git a/storage/bdb/test/test079.tcl b/storage/bdb/test/test079.tcl
deleted file mode 100644
index d5dbc330409..00000000000
--- a/storage/bdb/test/test079.tcl
+++ /dev/null
@@ -1,29 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test079.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test079
-# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
-# TEST
-# TEST Check that delete operations work in large btrees. 10000 entries
-# TEST and a pagesize of 512 push this out to a four-level btree, with a
-# TEST small fraction of the entries going on overflow pages.
-proc test079 { method {nentries 10000} {pagesize 512} {tnum "079"} \
- {ndups 20} args} {
- if { [ is_queueext $method ] == 1 } {
- set method "queue";
- lappend args "-extent" "20"
- }
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test$tnum: skipping for specific pagesizes"
- return
- }
-
- eval {test006 $method $nentries 1 $tnum $ndups -pagesize \
- $pagesize} $args
-}
diff --git a/storage/bdb/test/test080.tcl b/storage/bdb/test/test080.tcl
deleted file mode 100644
index 9f649496f68..00000000000
--- a/storage/bdb/test/test080.tcl
+++ /dev/null
@@ -1,126 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2002
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
-#
-# TEST test080
-# TEST Test of DB->remove()
-proc test080 { method {tnum 80} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test0$tnum: Test of DB->remove()"
-
- # Determine full path
- set curdir [pwd]
- cd $testdir
- set fulldir [pwd]
- cd $curdir
-
- # Test both relative and absolute path
- set paths [list $fulldir $testdir]
-
- # If we are using an env, then skip this test.
- # It needs its own.
- set eindex [lsearch -exact $args "-env"]
- set encargs ""
- set args [split_encargs $args encargs]
- if { $encargs != ""} {
- puts "Skipping test080 for security"
- return
- }
- if { $eindex != -1 } {
- incr eindex
- set e [lindex $args $eindex]
- puts "Skipping test080 for env $e"
- return
- }
-
- foreach path $paths {
-
- set dbfile test0$tnum.db
- set testfile $path/$dbfile
-
- # Loop through test using the following remove options
- # 1. no environment, not in transaction
- # 2. with environment, not in transaction
- # 3. rename with auto-commit
- # 4. rename in committed transaction
- # 5. rename in aborted transaction
-
- foreach op "noenv env auto commit abort" {
-
- # Make sure we're starting with a clean slate.
- env_cleanup $testdir
- if { $op == "noenv" } {
- set dbfile $testfile
- set e NULL
- set envargs ""
- } else {
- if { $op == "env" } {
- set largs ""
- } else {
- set largs " -txn"
- }
- set e [eval {berkdb_env -create -home $path} $largs]
- set envargs "-env $e"
- error_check_good env_open [is_valid_env $e] TRUE
- }
-
- puts "\tTest0$tnum: dbremove with $op in $path"
- puts "\tTest0$tnum.a.1: Create file"
- set db [eval {berkdb_open -create -mode 0644} $omethod \
- $envargs $args {$dbfile}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # The nature of the key and data are unimportant;
- # use numeric key to record-based methods don't need
- # special treatment.
- set key 1
- set data [pad_data $method data]
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
- error_check_good file_exists_before \
- [file exists $testfile] 1
-
- # Use berkdb dbremove for non-transactional tests
- # and $env dbremove for transactional tests
- puts "\tTest0$tnum.a.2: Remove file"
- if { $op == "noenv" || $op == "env" } {
- error_check_good remove_$op \
- [eval {berkdb dbremove} $envargs $dbfile] 0
- } elseif { $op == "auto" } {
- error_check_good remove_$op \
- [eval {$e dbremove} -auto_commit $dbfile] 0
- } else {
- # $op is "abort" or "commit"
- set txn [$e txn]
- error_check_good remove_$op \
- [eval {$e dbremove} -txn $txn $dbfile] 0
- error_check_good txn_$op [$txn $op] 0
- }
-
- puts "\tTest0$tnum.a.3: Check that file is gone"
- # File should now be gone, except in the case of an abort.
- if { $op != "abort" } {
- error_check_good exists_after \
- [file exists $testfile] 0
- } else {
- error_check_good exists_after \
- [file exists $testfile] 1
- }
-
- if { $e != "NULL" } {
- error_check_good env_close [$e close] 0
- }
-
- set dbfile test0$tnum-old.db
- set testfile $path/$dbfile
- }
- }
-}
diff --git a/storage/bdb/test/test081.tcl b/storage/bdb/test/test081.tcl
deleted file mode 100644
index a9c7f5cfc86..00000000000
--- a/storage/bdb/test/test081.tcl
+++ /dev/null
@@ -1,15 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test081.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test081
-# TEST Test off-page duplicates and overflow pages together with
-# TEST very large keys (key/data as file contents).
-proc test081 { method {ndups 13} {tnum "081"} args} {
- source ./include.tcl
-
- eval {test017 $method 1 $ndups $tnum} $args
-}
diff --git a/storage/bdb/test/test082.tcl b/storage/bdb/test/test082.tcl
deleted file mode 100644
index fb4d71c5c49..00000000000
--- a/storage/bdb/test/test082.tcl
+++ /dev/null
@@ -1,14 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test082.tcl,v 11.8 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test082
-# TEST Test of DB_PREV_NODUP (uses test074).
-proc test082 { method {dir -prevnodup} {nitems 100} {tnum "082"} args} {
- source ./include.tcl
-
- eval {test074 $method $dir $nitems $tnum} $args
-}
diff --git a/storage/bdb/test/test083.tcl b/storage/bdb/test/test083.tcl
deleted file mode 100644
index 7e4a8b960a4..00000000000
--- a/storage/bdb/test/test083.tcl
+++ /dev/null
@@ -1,166 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test083.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test083
-# TEST Test of DB->key_range.
-proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
- source ./include.tcl
-
- global rand_init
- error_check_good set_random_seed [berkdb srand $rand_init] 0
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- puts "Test083 $method ($args): Test of DB->key_range"
- if { [is_btree $method] != 1 } {
- puts "\tTest083: Skipping for method $method."
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test083: skipping for specific pagesizes"
- return
- }
-
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- if { $eindex == -1 } {
- set testfile $testdir/test083.db
- set env NULL
- } else {
- set testfile test083.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
-
- # We assume that numbers will be at most six digits wide
- error_check_bad maxitems_range [expr $maxitems > 999999] 1
-
- # We want to test key_range on a variety of sizes of btree.
- # Start at ten keys and work up to $maxitems keys, at each step
- # multiplying the number of keys by $step.
- for { set nitems 10 } { $nitems <= $maxitems }\
- { set nitems [expr $nitems * $step] } {
-
- puts "\tTest083.a: Opening new database"
- if { $env != "NULL"} {
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- set db [eval {berkdb_open -create -mode 0644} \
- -pagesize $pgsz $omethod $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- t83_build $db $nitems $env $txnenv
- t83_test $db $nitems $env $txnenv
-
- error_check_good db_close [$db close] 0
- }
-}
-
-proc t83_build { db nitems env txnenv } {
- source ./include.tcl
-
- puts "\tTest083.b: Populating database with $nitems keys"
-
- set keylist {}
- puts "\t\tTest083.b.1: Generating key list"
- for { set i 0 } { $i < $nitems } { incr i } {
- lappend keylist $i
- }
-
- # With randomly ordered insertions, the range of errors we
- # get from key_range can be unpredictably high [#2134]. For now,
- # just skip the randomization step.
- #puts "\t\tTest083.b.2: Randomizing key list"
- #set keylist [randomize_list $keylist]
- #puts "\t\tTest083.b.3: Populating database with randomized keys"
-
- puts "\t\tTest083.b.2: Populating database"
- set data [repeat . 50]
- set txn ""
- foreach keynum $keylist {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
- error_check_good db_put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-}
-
-proc t83_test { db nitems env txnenv } {
- # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
- # all the way through the database. Make sure the key_ranges
- # aren't off by more than 10%.
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- } else {
- set txn ""
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good dbc [is_valid_cursor $dbc $db] TRUE
-
- puts "\tTest083.c: Verifying ranges..."
-
- for { set i 0 } { $i < $nitems } \
- { incr i [expr $nitems / [berkdb random_int 3 16]] } {
- puts "\t\t...key $i"
- error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
-
- for { set j 0 } { $j < $i } { incr j } {
- error_check_bad key$j \
- [llength [set dbt [$dbc get -next]]] 0
- }
-
- set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
-
- #puts $ranges
- error_check_good howmanyranges [llength $ranges] 3
-
- set lessthan [lindex $ranges 0]
- set morethan [lindex $ranges 2]
-
- set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
-
- roughly_equal $rangesum 1 0.05
-
- # Wild guess.
- if { $nitems < 500 } {
- set tol 0.3
- } elseif { $nitems > 500 } {
- set tol 0.15
- }
-
- roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol
-
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-}
-
-proc roughly_equal { a b tol } {
- error_check_good "$a =~ $b" [expr $a - $b < $tol] 1
-}
diff --git a/storage/bdb/test/test084.tcl b/storage/bdb/test/test084.tcl
deleted file mode 100644
index 036c1c41118..00000000000
--- a/storage/bdb/test/test084.tcl
+++ /dev/null
@@ -1,53 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test084.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test084
-# TEST Basic sanity test (test001) with large (64K) pages.
-proc test084 { method {nentries 10000} {tnum "084"} {pagesize 65536} args} {
- source ./include.tcl
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-empty.db
- set env NULL
- } else {
- set testfile test$tnum-empty.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test084: skipping for specific pagesizes"
- return
- }
-
- cleanup $testdir $env
-
- set args "-pagesize $pagesize $args"
-
- eval {test001 $method $nentries 0 0 $tnum} $args
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- # For good measure, create a second database that's empty
- # with the large page size. (There was a verifier bug that
- # choked on empty 64K pages. [#2408])
- set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
- error_check_good empty_db [is_valid_db $db] TRUE
- error_check_good empty_db_close [$db close] 0
-}
diff --git a/storage/bdb/test/test085.tcl b/storage/bdb/test/test085.tcl
deleted file mode 100644
index 373db33a5ad..00000000000
--- a/storage/bdb/test/test085.tcl
+++ /dev/null
@@ -1,331 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test085.tcl,v 1.18 2004/09/20 17:29:32 carol Exp $
-#
-# TEST test085
-# TEST Test of cursor behavior when a cursor is pointing to a deleted
-# TEST btree key which then has duplicates added. [#2473]
-proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } {
- source ./include.tcl
- global alphabet
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
-
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test085: skipping for specific pagesizes"
- return
- }
- cleanup $testdir $env
-
- # Keys must sort $prekey < $key < $postkey.
- set prekey "AA"
- set key "BBB"
- set postkey "CCCC"
-
- # Make these distinguishable from each other and from the
- # alphabets used for the $key's data.
- set predatum "1234567890"
- set datum $alphabet
- set postdatum "0987654321"
- set txn ""
-
- append args " -pagesize $pagesize -dup"
-
- puts -nonewline "Test$tnum $omethod ($args): "
-
- # Skip for all non-btrees. (Rbtrees don't count as btrees, for
- # now, since they don't support dups.)
- if { [is_btree $method] != 1 } {
- puts "Skipping for method $method."
- return
- } else {
- puts "Duplicates w/ deleted item cursor."
- }
-
- # Repeat the test with both on-page and off-page numbers of dups.
- foreach ndups "$onp $offp" {
- # Put operations we want to test on a cursor set to the
- # deleted item, the key to use with them, and what should
- # come before and after them given a placement of
- # the deleted item at the beginning or end of the dupset.
- set final [expr $ndups - 1]
- set putops {
- {{-before} "" $predatum {[test085_ddatum 0]} beginning}
- {{-before} "" {[test085_ddatum $final]} $postdatum end}
- {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
- {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
- {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
- {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
- {{-after} "" $predatum {[test085_ddatum 0]} beginning}
- {{-after} "" {[test085_ddatum $final]} $postdatum end}
- }
-
- # Get operations we want to test on a cursor set to the
- # deleted item, any args to get, and the expected key/data pair.
- set getops {
- {{-current} "" "" "" beginning}
- {{-current} "" "" "" end}
- {{-next} "" $key {[test085_ddatum 0]} beginning}
- {{-next} "" $postkey $postdatum end}
- {{-prev} "" $prekey $predatum beginning}
- {{-prev} "" $key {[test085_ddatum $final]} end}
- {{-first} "" $prekey $predatum beginning}
- {{-first} "" $prekey $predatum end}
- {{-last} "" $postkey $postdatum beginning}
- {{-last} "" $postkey $postdatum end}
- {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
- {{-nextdup} "" EMPTYLIST "" end}
- {{-nextnodup} "" $postkey $postdatum beginning}
- {{-nextnodup} "" $postkey $postdatum end}
- {{-prevnodup} "" $prekey $predatum beginning}
- {{-prevnodup} "" $prekey $predatum end}
- }
-
- set txn ""
- foreach pair $getops {
- set op [lindex $pair 0]
- puts "\tTest$tnum: Get ($op) with $ndups duplicates,\
- cursor at the [lindex $pair 4]."
- set db [eval {berkdb_open -create \
- -mode 0644} $omethod $encargs $args $testfile]
- error_check_good "db open" [is_valid_db $db] TRUE
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn \
- [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [test085_setup $db $txn]
-
- set beginning [expr [string compare \
- [lindex $pair 4] "beginning"] == 0]
-
- for { set i 0 } { $i < $ndups } { incr i } {
- if { $beginning } {
- error_check_good db_put($i) \
- [eval {$db put} $txn \
- {$key [test085_ddatum $i]}] 0
- } else {
- set c [eval {$db cursor} $txn]
- set j [expr $ndups - $i - 1]
- error_check_good db_cursor($j) \
- [is_valid_cursor $c $db] TRUE
- set d [test085_ddatum $j]
- error_check_good dbc_put($j) \
- [$c put -keyfirst $key $d] 0
- error_check_good c_close [$c close] 0
- }
- }
-
- set gargs [lindex $pair 1]
- set ekey ""
- set edata ""
- eval set ekey [lindex $pair 2]
- eval set edata [lindex $pair 3]
-
- set dbt [eval $dbc get $op $gargs]
- if { [string compare $ekey EMPTYLIST] == 0 || \
- [string compare $op -current] == 0 } {
- error_check_good dbt($op,$ndups) \
- [llength $dbt] 0
- } else {
- error_check_good dbt($op,$ndups) $dbt \
- [list [list $ekey $edata]]
- }
- error_check_good "dbc close" [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good "db close" [$db close] 0
- verify_dir $testdir "\t\t"
-
- # Remove testfile so we can do without truncate flag.
- # This is okay because we've already done verify and
- # dump/load.
- if { $env == "NULL" } {
- set ret [eval {berkdb dbremove} \
- $encargs $testfile]
- } elseif { $txnenv == 1 } {
- set ret [eval "$env dbremove" \
- -auto_commit $encargs $testfile]
- } else {
- set ret [eval {berkdb dbremove} \
- -env $env $encargs $testfile]
- }
- error_check_good dbremove $ret 0
-
- }
-
- foreach pair $putops {
- # Open and set up database.
- set op [lindex $pair 0]
- puts "\tTest$tnum: Put ($op) with $ndups duplicates,\
- cursor at the [lindex $pair 4]."
- set db [eval {berkdb_open -create \
- -mode 0644} $omethod $args $encargs $testfile]
- error_check_good "db open" [is_valid_db $db] TRUE
-
- set beginning [expr [string compare \
- [lindex $pair 4] "beginning"] == 0]
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [test085_setup $db $txn]
-
- # Put duplicates.
- for { set i 0 } { $i < $ndups } { incr i } {
- if { $beginning } {
- error_check_good db_put($i) \
- [eval {$db put} $txn \
- {$key [test085_ddatum $i]}] 0
- } else {
- set c [eval {$db cursor} $txn]
- set j [expr $ndups - $i - 1]
- error_check_good db_cursor($j) \
- [is_valid_cursor $c $db] TRUE
- set d [test085_ddatum $j]
- error_check_good dbc_put($j) \
- [$c put -keyfirst $key $d] 0
- error_check_good c_close [$c close] 0
- }
- }
-
- # Set up cursors for stability test.
- set pre_dbc [eval {$db cursor} $txn]
- error_check_good pre_set [$pre_dbc get -set $prekey] \
- [list [list $prekey $predatum]]
- set post_dbc [eval {$db cursor} $txn]
- error_check_good post_set [$post_dbc get -set $postkey]\
- [list [list $postkey $postdatum]]
- set first_dbc [eval {$db cursor} $txn]
- error_check_good first_set \
- [$first_dbc get -get_both $key [test085_ddatum 0]] \
- [list [list $key [test085_ddatum 0]]]
- set last_dbc [eval {$db cursor} $txn]
- error_check_good last_set \
- [$last_dbc get -get_both $key [test085_ddatum \
- [expr $ndups - 1]]] \
- [list [list $key [test085_ddatum [expr $ndups -1]]]]
-
- set k [lindex $pair 1]
- set d_before ""
- set d_after ""
- eval set d_before [lindex $pair 2]
- eval set d_after [lindex $pair 3]
- set newdatum "NewDatum"
- error_check_good dbc_put($op,$ndups) \
- [eval $dbc put $op $k $newdatum] 0
- error_check_good dbc_prev($op,$ndups) \
- [lindex [lindex [$dbc get -prev] 0] 1] \
- $d_before
- error_check_good dbc_current($op,$ndups) \
- [lindex [lindex [$dbc get -next] 0] 1] \
- $newdatum
-
- error_check_good dbc_next($op,$ndups) \
- [lindex [lindex [$dbc get -next] 0] 1] \
- $d_after
-
- # Verify stability of pre- and post- cursors.
- error_check_good pre_stable [$pre_dbc get -current] \
- [list [list $prekey $predatum]]
- error_check_good post_stable [$post_dbc get -current] \
- [list [list $postkey $postdatum]]
- error_check_good first_stable \
- [$first_dbc get -current] \
- [list [list $key [test085_ddatum 0]]]
- error_check_good last_stable \
- [$last_dbc get -current] \
- [list [list $key [test085_ddatum [expr $ndups -1]]]]
-
- foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
- error_check_good ${c}_close [$c close] 0
- }
-
- error_check_good "dbc close" [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good "db close" [$db close] 0
- verify_dir $testdir "\t\t"
-
- # Remove testfile so we can do without truncate flag.
- # This is okay because we've already done verify and
- # dump/load.
- if { $env == "NULL" } {
- set ret [eval {berkdb dbremove} \
- $encargs $testfile]
- } elseif { $txnenv == 1 } {
- set ret [eval "$env dbremove" \
- -auto_commit $encargs $testfile]
- } else {
- set ret [eval {berkdb dbremove} \
- -env $env $encargs $testfile]
- }
- error_check_good dbremove $ret 0
- }
- }
-}
-
-# Set up the test database; put $prekey, $key, and $postkey with their
-# respective data, and then delete $key with a new cursor. Return that
-# cursor, still pointing to the deleted item.
-proc test085_setup { db txn } {
- upvar key key
- upvar prekey prekey
- upvar postkey postkey
- upvar predatum predatum
- upvar postdatum postdatum
-
- # no one else should ever see this one!
- set datum "bbbbbbbb"
-
- error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
- error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
- error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
-
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- error_check_good dbc_getset [$dbc get -get_both $key $datum] \
- [list [list $key $datum]]
-
- error_check_good dbc_del [$dbc del] 0
-
- return $dbc
-}
-
-proc test085_ddatum { a } {
- global alphabet
- return $a$alphabet
-}
diff --git a/storage/bdb/test/test086.tcl b/storage/bdb/test/test086.tcl
deleted file mode 100644
index 8b2f7db81be..00000000000
--- a/storage/bdb/test/test086.tcl
+++ /dev/null
@@ -1,166 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test086.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test086
-# TEST Test of cursor stability across btree splits/rsplits with
-# TEST subtransaction aborts (a variant of test048). [#2373]
-proc test086 { method args } {
- global errorCode
- source ./include.tcl
-
- set tnum 086
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
-
- if { [is_btree $method] != 1 } {
- puts "Test$tnum skipping for method $method."
- return
- }
-
- set method "-btree"
-
- puts "\tTest$tnum: Test of cursor stability across aborted\
- btree splits."
-
- set key "key"
- set data "data"
- set txn ""
- set flags ""
-
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then this test won't work.
- if { $eindex == -1 } {
- # But we will be using our own env...
- set testfile test$tnum.db
- } else {
- puts "\tTest$tnum: Environment provided; skipping test."
- return
- }
- set t1 $testdir/t1
- env_cleanup $testdir
-
- set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
- error_check_good berkdb_env [is_valid_env $env] TRUE
-
- puts "\tTest$tnum.a: Create $method database."
- set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 5
- # Fill page w/ small key/data pairs, keep at leaf
- #
- puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put -txn $txn key000$i $data$i]
- error_check_good dbput $ret 0
- }
- error_check_good commit [$txn commit] 0
-
- # get db ordering, set cursors
- puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs."
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- for {set i 0; set ret [$db get -txn $txn key000$i]} {\
- $i < $nkeys && [llength $ret] != 0} {\
- incr i; set ret [$db get -txn $txn key000$i]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [$db cursor -txn $txn]
- set dbc_set($i) $dbc
- error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
- set ret [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc_set($i)_get:set [llength $ret] 0
- }
-
- # Create child txn.
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn [is_valid_txn $txn $env] TRUE
-
- # if mkeys is above 1000, need to adjust below for lexical order
- set mkeys 1000
- puts "\tTest$tnum.d: Add $mkeys pairs to force split."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 100 } {
- set ret [$db put -txn $ctxn key0$i $data$i]
- } elseif { $i >= 10 } {
- set ret [$db put -txn $ctxn key00$i $data$i]
- } else {
- set ret [$db put -txn $ctxn key000$i $data$i]
- }
- error_check_good dbput:more $ret 0
- }
-
- puts "\tTest$tnum.e: Abort."
- error_check_good ctxn_abort [$ctxn abort] 0
-
- puts "\tTest$tnum.f: Check and see that cursors maintained reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- # Put (and this time keep) the keys that caused the split.
- # We'll delete them to test reverse splits.
- puts "\tTest$tnum.g: Put back added keys."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 100 } {
- set ret [$db put -txn $txn key0$i $data$i]
- } elseif { $i >= 10 } {
- set ret [$db put -txn $txn key00$i $data$i]
- } else {
- set ret [$db put -txn $txn key000$i $data$i]
- }
- error_check_good dbput:more $ret 0
- }
-
- puts "\tTest$tnum.h: Delete added keys to force reverse split."
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn [is_valid_txn $txn $env] TRUE
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 100 } {
- error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
- } elseif { $i >= 10 } {
- error_check_good db_del:$i \
- [$db del -txn $ctxn key00$i] 0
- } else {
- error_check_good db_del:$i \
- [$db del -txn $ctxn key000$i] 0
- }
- }
-
- puts "\tTest$tnum.i: Abort."
- error_check_good ctxn_abort [$ctxn abort] 0
-
- puts "\tTest$tnum.j: Verify cursor reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- puts "\tTest$tnum.j: Cleanup."
- # close cursors
- for {set i 0} { $i < $nkeys } {incr i} {
- error_check_good dbc_close:$i [$dbc_set($i) close] 0
- }
-
- error_check_good commit [$txn commit] 0
- error_check_good dbclose [$db close] 0
- error_check_good envclose [$env close] 0
-
- puts "\tTest$tnum complete."
-}
diff --git a/storage/bdb/test/test087.tcl b/storage/bdb/test/test087.tcl
deleted file mode 100644
index 7501f4ce3f6..00000000000
--- a/storage/bdb/test/test087.tcl
+++ /dev/null
@@ -1,290 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test087.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $
-#
-# TEST test087
-# TEST Test of cursor stability when converting to and modifying
-# TEST off-page duplicate pages with subtransaction aborts. [#2373]
-# TEST
-# TEST Does the following:
-# TEST a. Initialize things by DB->putting ndups dups and
-# TEST setting a reference cursor to point to each. Do each put twice,
-# TEST first aborting, then committing, so we're sure to abort the move
-# TEST to off-page dups at some point.
-# TEST b. c_put ndups dups (and correspondingly expanding
-# TEST the set of reference cursors) after the last one, making sure
-# TEST after each step that all the reference cursors still point to
-# TEST the right item.
-# TEST c. Ditto, but before the first one.
-# TEST d. Ditto, but after each one in sequence first to last.
-# TEST e. Ditto, but after each one in sequence from last to first.
-# TEST occur relative to the new datum)
-# TEST f. Ditto for the two sequence tests, only doing a
-# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# TEST new one.
-proc test087 { method {pagesize 512} {ndups 50} {tnum "087"} args } {
- source ./include.tcl
- global alphabet
-
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- puts "Test$tnum $omethod ($args): "
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then return
- if { $eindex != -1 } {
- puts "Environment specified; skipping."
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test087: skipping for specific pagesizes"
- return
- }
- env_cleanup $testdir
- set testfile test$tnum.db
- set key "the key"
- append args " -pagesize $pagesize -dup"
-
- if { [is_record_based $method] || [is_rbtree $method] } {
- puts "Skipping for method $method."
- return
- } else {
- puts "Test$tnum: Cursor stability on dup. pages w/ aborts."
- }
-
- set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
- error_check_good env_create [is_valid_env $env] TRUE
-
- set db [eval {berkdb_open -auto_commit \
- -create -env $env -mode 0644} $omethod $args $testfile]
- error_check_good "db open" [is_valid_db $db] TRUE
-
- # Number of outstanding keys.
- set keys $ndups
-
- puts "\tTest$tnum.a: put/abort/put/commit loop;\
- $ndups dups, short data."
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- for { set i 0 } { $i < $ndups } { incr i } {
- set datum [makedatum_t73 $i 0]
-
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
- error_check_good "db put/abort ($i)" \
- [$db put -txn $ctxn $key $datum] 0
- error_check_good ctxn_abort($i) [$ctxn abort] 0
-
- verify_t73 is_long dbc [expr $i - 1] $key
-
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
- error_check_good "db put/commit ($i)" \
- [$db put -txn $ctxn $key $datum] 0
- error_check_good ctxn_commit($i) [$ctxn commit] 0
-
- set is_long($i) 0
-
- set dbc($i) [$db cursor -txn $txn]
- error_check_good "db cursor ($i)"\
- [is_valid_cursor $dbc($i) $db] TRUE
- error_check_good "dbc get -get_both ($i)"\
- [$dbc($i) get -get_both $key $datum]\
- [list [list $key $datum]]
-
- verify_t73 is_long dbc $i $key
- }
-
- puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
- short data."
-
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- for { set i 0 } { $i < $ndups } { incr i } {
- # !!! keys contains the number of the next dup
- # to be added (since they start from zero)
- set datum [makedatum_t73 $keys 0]
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
- error_check_good "c_put(DB_KEYLAST, $keys)"\
- [$curs put -keylast $key $datum] 0
-
- # We can't do a verification while a child txn is active,
- # or we'll run into trouble when DEBUG_ROP is enabled.
- # If this test has trouble, though, uncommenting this
- # might be illuminating--it makes things a bit more rigorous
- # and works fine when DEBUG_ROP is not enabled.
- # verify_t73 is_long dbc $keys $key
- error_check_good curs_close [$curs close] 0
- }
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
- short data."
-
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- for { set i 0 } { $i < $ndups } { incr i } {
- # !!! keys contains the number of the next dup
- # to be added (since they start from zero)
-
- set datum [makedatum_t73 $keys 0]
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
- error_check_good "c_put(DB_KEYFIRST, $keys)"\
- [$curs put -keyfirst $key $datum] 0
-
- # verify_t73 is_long dbc $keys $key
- error_check_good curs_close [$curs close] 0
- }
- # verify_t73 is_long dbc $keys $key
- # verify_t73 is_long dbc $keys $key
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\
- $keys new dups, short data"
- # We want to add a datum after each key from 0 to the current
- # value of $keys, which we thus need to save.
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- set keysnow $keys
- for { set i 0 } { $i < $keysnow } { incr i } {
- set datum [makedatum_t73 $keys 0]
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- # Which datum to insert this guy after.
- set curdatum [makedatum_t73 $i 0]
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $curdatum]\
- [list [list $key $curdatum]]
- error_check_good "c_put(DB_AFTER, $i)"\
- [$curs put -after $datum] 0
-
- # verify_t73 is_long dbc $keys $key
- error_check_good curs_close [$curs close] 0
- }
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\
- $keys new dups, short data"
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
- set datum [makedatum_t73 $keys 0]
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- # Which datum to insert this guy before.
- set curdatum [makedatum_t73 $i 0]
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $curdatum]\
- [list [list $key $curdatum]]
- error_check_good "c_put(DB_BEFORE, $i)"\
- [$curs put -before $datum] 0
-
- # verify_t73 is_long dbc $keys $key
- error_check_good curs_close [$curs close] 0
- }
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\
- growing $keys data."
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- for { set i 0 } { $i < $keysnow } { incr i } {
- set olddatum [makedatum_t73 $i 0]
- set newdatum [makedatum_t73 $i 1]
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db]\
- TRUE
-
- error_check_good "c_get(DB_GET_BOTH, $i)"\
- [$curs get -get_both $key $olddatum]\
- [list [list $key $olddatum]]
- error_check_good "c_put(DB_CURRENT, $i)"\
- [$curs put -current $newdatum] 0
-
- set is_long($i) 1
-
- # verify_t73 is_long dbc $keys $key
- error_check_good curs_close [$curs close] 0
- }
- error_check_good ctxn_abort [$ctxn abort] 0
- for { set i 0 } { $i < $keysnow } { incr i } {
- set is_long($i) 0
- }
- verify_t73 is_long dbc $keys $key
-
- # Now delete the first item, abort the deletion, and make sure
- # we're still sane.
- puts "\tTest$tnum.g: Cursor delete first item, then abort delete."
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
- set datum [makedatum_t73 0 0]
- error_check_good "c_get(DB_GET_BOTH, 0)"\
- [$curs get -get_both $key $datum] [list [list $key $datum]]
- error_check_good "c_del(0)" [$curs del] 0
- error_check_good curs_close [$curs close] 0
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- # Ditto, for the last item.
- puts "\tTest$tnum.h: Cursor delete last item, then abort delete."
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
- set datum [makedatum_t73 [expr $keys - 1] 0]
- error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
- [$curs get -get_both $key $datum] [list [list $key $datum]]
- error_check_good "c_del(0)" [$curs del] 0
- error_check_good curs_close [$curs close] 0
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- # Ditto, for all the items.
- puts "\tTest$tnum.i: Cursor delete all items, then abort delete."
- set ctxn [$env txn -parent $txn]
- error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
- set curs [$db cursor -txn $ctxn]
- error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
- set datum [makedatum_t73 0 0]
- error_check_good "c_get(DB_GET_BOTH, 0)"\
- [$curs get -get_both $key $datum] [list [list $key $datum]]
- error_check_good "c_del(0)" [$curs del] 0
- for { set i 1 } { $i < $keys } { incr i } {
- error_check_good "c_get(DB_NEXT, $i)"\
- [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
- error_check_good "c_del($i)" [$curs del] 0
- }
- error_check_good curs_close [$curs close] 0
- error_check_good ctxn_abort [$ctxn abort] 0
- verify_t73 is_long dbc $keys $key
-
- # Close cursors.
- puts "\tTest$tnum.j: Closing cursors."
- for { set i 0 } { $i < $keys } { incr i } {
- error_check_good "dbc close ($i)" [$dbc($i) close] 0
- }
- error_check_good txn_commit [$txn commit] 0
- error_check_good "db close" [$db close] 0
- error_check_good "env close" [$env close] 0
-}
diff --git a/storage/bdb/test/test088.tcl b/storage/bdb/test/test088.tcl
deleted file mode 100644
index 2d3ec739630..00000000000
--- a/storage/bdb/test/test088.tcl
+++ /dev/null
@@ -1,174 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test088.tcl,v 11.15 2004/05/13 18:51:44 mjc Exp $
-#
-# TEST test088
-# TEST Test of cursor stability across btree splits with very
-# TEST deep trees (a variant of test048). [#2514]
-proc test088 { method args } {
- source ./include.tcl
- global alphabet
- global errorCode
- global is_je_test
-
- set tstn 088
- set args [convert_args $method $args]
-
- if { [is_btree $method] != 1 } {
- puts "Test$tstn skipping for method $method."
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test088: skipping for specific pagesizes"
- return
- }
-
- set method "-btree"
-
- puts "\tTest$tstn: Test of cursor stability across btree splits."
-
- set key "key$alphabet$alphabet$alphabet"
- set data "data$alphabet$alphabet$alphabet"
- set txn ""
- set flags ""
-
- puts "\tTest$tstn.a: Create $method database."
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tstn.db
- set env NULL
- } else {
- set testfile test$tstn.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- set ps 512
- set txn ""
- set oflags "-create -pagesize $ps -mode 0644 $args $method"
- set db [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set nkeys 5
- # Fill page w/ key/data pairs.
- #
- puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
- for { set i 0 } { $i < $nkeys } { incr i } {
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn {${key}00000$i $data$i}]
- error_check_good dbput $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- # get db ordering, set cursors
- puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
- # if mkeys is above 1000, need to adjust below for lexical order
- set mkeys 30000
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- set mkeys 300
- }
- for {set i 0; set ret [$db get ${key}00000$i]} {\
- $i < $nkeys && [llength $ret] != 0} {\
- incr i; set ret [$db get ${key}00000$i]} {
- set key_set($i) [lindex [lindex $ret 0] 0]
- set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [eval {$db cursor} $txn]
- set dbc_set($i) $dbc
- error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
- set ret [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc_set($i)_get:set [llength $ret] 0
- }
-
- puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 10000 } {
- set ret [eval {$db put} $txn {${key}0$i $data$i}]
- } elseif { $i >= 1000 } {
- set ret [eval {$db put} $txn {${key}00$i $data$i}]
- } elseif { $i >= 100 } {
- set ret [eval {$db put} $txn {${key}000$i $data$i}]
- } elseif { $i >= 10 } {
- set ret [eval {$db put} $txn {${key}0000$i $data$i}]
- } else {
- set ret [eval {$db put} $txn {${key}00000$i $data$i}]
- }
- error_check_good dbput:more $ret 0
- }
-
- puts "\tTest$tstn.e: Make sure splits happened."
- # XXX cannot execute stat in presence of txns and cursors.
- if { $txnenv == 0 && !$is_je_test } {
- error_check_bad stat:check-split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
- }
-
- puts "\tTest$tstn.f: Check to see that cursors maintained reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- puts "\tTest$tstn.g: Delete added keys to force reverse splits."
- for {set i $nkeys} { $i < $mkeys } { incr i } {
- if { $i >= 10000 } {
- set ret [eval {$db del} $txn {${key}0$i}]
- } elseif { $i >= 1000 } {
- set ret [eval {$db del} $txn {${key}00$i}]
- } elseif { $i >= 100 } {
- set ret [eval {$db del} $txn {${key}000$i}]
- } elseif { $i >= 10 } {
- set ret [eval {$db del} $txn {${key}0000$i}]
- } else {
- set ret [eval {$db del} $txn {${key}00000$i}]
- }
- error_check_good dbput:more $ret 0
- }
-
- puts "\tTest$tstn.h: Verify cursor reference."
- for {set i 0} { $i < $nkeys } {incr i} {
- set ret [$dbc_set($i) get -current]
- error_check_bad dbc$i:get:current [llength $ret] 0
- set ret2 [$dbc_set($i) get -set $key_set($i)]
- error_check_bad dbc$i:get:set [llength $ret2] 0
- error_check_good dbc$i:get(match) $ret $ret2
- }
-
- puts "\tTest$tstn.i: Cleanup."
- # close cursors
- for {set i 0} { $i < $nkeys } {incr i} {
- error_check_good dbc_close:$i [$dbc_set($i) close] 0
- }
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good dbclose [$db close] 0
-
- puts "\tTest$tstn complete."
-}
diff --git a/storage/bdb/test/test089.tcl b/storage/bdb/test/test089.tcl
deleted file mode 100644
index 3eb2cd88d06..00000000000
--- a/storage/bdb/test/test089.tcl
+++ /dev/null
@@ -1,264 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test089.tcl,v 11.9 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test089
-# TEST Concurrent Data Store test (CDB)
-# TEST
-# TEST Enhanced CDB testing to test off-page dups, cursor dups and
-# TEST cursor operations like c_del then c_get.
-proc test089 { method {nentries 1000} args } {
- global datastr
- global encrypt
- source ./include.tcl
-
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test089 skipping for env $env"
- return
- }
- set encargs ""
- set args [convert_args $method $args]
- set oargs [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
-
- # Process arguments
- # Create the database and open the dictionary
- set testfile test089.db
- set testfile1 test089a.db
-
- env_cleanup $testdir
-
- set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- set db [eval {berkdb_open -env $env -create \
- -mode 0644 $omethod} $oargs {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set db1 [eval {berkdb_open -env $env -create \
- -mode 0644 $omethod} $oargs {$testfile1}]
- error_check_good dbopen [is_valid_db $db1] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put each key/data pair
- puts "\tTest089.a: Put loop"
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put:$db $ret 0
- set ret [eval {$db1 put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put:$db1 $ret 0
- incr count
- }
- close $did
- error_check_good close:$db [$db close] 0
- error_check_good close:$db1 [$db1 close] 0
-
- # Database is created, now set up environment
-
- # Remove old mpools and Open/create the lock and mpool regions
- error_check_good env:close:$env [$env close] 0
- set ret [eval {berkdb envremove} $encargs -home $testdir]
- error_check_good env_remove $ret 0
-
- set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
- error_check_good dbenv [is_valid_widget $env env] TRUE
-
- puts "\tTest089.b: CDB cursor dups"
-
- set db1 [eval {berkdb_open_noerr -env $env -create \
- -mode 0644 $omethod} $oargs {$testfile1}]
- error_check_good dbopen [is_valid_db $db1] TRUE
-
- # Create a read-only cursor and make sure we can't write with it.
- set dbcr [$db1 cursor]
- error_check_good dbcursor [is_valid_cursor $dbcr $db1] TRUE
- set ret [$dbcr get -first]
- catch { [$dbcr put -current data] } ret
- error_check_good is_read_only \
- [is_substr $ret "Write attempted on read-only cursor"] 1
- error_check_good dbcr_close [$dbcr close] 0
-
- # Create a write cursor and duplicate it.
- set dbcw [$db1 cursor -update]
- error_check_good dbcursor [is_valid_cursor $dbcw $db1] TRUE
- set dup_dbcw [$dbcw dup]
- error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db1] TRUE
-
- # Position both cursors at get -first. They should find the same data.
- set get_first [$dbcw get -first]
- set get_first_dup [$dup_dbcw get -first]
- error_check_good dup_read $get_first $get_first_dup
-
- # Test that the write cursors can both write and that they
- # read each other's writes correctly. First write reversed
- # datastr with original cursor and read with dup cursor.
- error_check_good put_current_orig \
- [$dbcw put -current [chop_data $method [reverse $datastr]]] 0
- set reversed [$dup_dbcw get -current]
- error_check_good check_with_dup [lindex [lindex $reversed 0] 1] \
- [chop_data $method [reverse $datastr]]
-
- # Write forward datastr with dup cursor and read with original.
- error_check_good put_current_dup \
- [$dup_dbcw put -current [chop_data $method $datastr]] 0
- set forward [$dbcw get -current]
- error_check_good check_with_orig $forward $get_first
-
- error_check_good dbcw_close [$dbcw close] 0
- error_check_good dup_dbcw_close [$dup_dbcw close] 0
-
- # This tests the failure found in #1923
- puts "\tTest089.c: Test delete then get"
-
- set dbc [$db1 cursor -update]
- error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
-
- for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
- {set kd [$dbc get -next] } {
- error_check_good dbcdel [$dbc del] 0
- }
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db1 close] 0
- error_check_good env_close [$env close] 0
-
- if { [is_btree $method] != 1 } {
- puts "Skipping rest of test089 for $method method."
- return
- }
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Skipping rest of test089 for specific pagesizes"
- return
- }
- append oargs " -dup "
- test089_dup $testdir $encargs $oargs $omethod $nentries
- append oargs " -dupsort "
- test089_dup $testdir $encargs $oargs $omethod $nentries
-}
-
-proc test089_dup { testdir encargs oargs method nentries } {
- env_cleanup $testdir
- set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- #
- # Set pagesize small to generate lots of off-page dups
- #
- set page 512
- set nkeys 5
- set data "data"
- set key "test089_key"
- set testfile test089.db
- puts "\tTest089.d: CDB ($oargs) off-page dups"
- set oflags "-env $env -create -mode 0644 $oargs $method"
- set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
- for { set k 0 } { $k < $nkeys } { incr k } {
- for { set i 0 } { $i < $nentries } { incr i } {
- set ret [$db put $key$k $i$data$k]
- error_check_good dbput $ret 0
- }
- }
-
- # Verify we have off-page duplicates
- set stat [$db stat]
- error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
-
- # This tests the failure reported in #6950. Skip for -dupsort.
- puts "\tTest089.f: Clear locks for duped off-page dup cursors."
- if { [is_substr $oargs dupsort] != 1 } {
- # Create a read cursor, put it on an off-page dup.
- set dbcr [$db cursor]
- error_check_good dbcr [is_valid_cursor $dbcr $db] TRUE
- set offpage [$dbcr get -get_both test089_key4 900data4]
- error_check_bad offpage [llength $offpage] 0
-
- # Create a write cursor, put it on an off-page dup.
- set dbcw [$db cursor -update]
- error_check_good dbcw [is_valid_cursor $dbcw $db] TRUE
- set offpage [$dbcw get -get_both test089_key3 900data3]
- error_check_bad offpage [llength $offpage] 0
-
- # Add a new item using the write cursor, then close the cursor.
- error_check_good add_dup [$dbcw put -after $data] 0
- error_check_good close_dbcw [$dbcw close] 0
-
- # Get next dup with read cursor, then close the cursor.
- set nextdup [$dbcr get -nextdup]
- error_check_good close_dbcr [$dbcr close] 0
- }
-
- puts "\tTest089.g: CDB duplicate write cursors with off-page dups"
- # Create a write cursor and duplicate it.
- set dbcw [$db cursor -update]
- error_check_good dbcursor [is_valid_cursor $dbcw $db] TRUE
- set dup_dbcw [$dbcw dup]
- error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db] TRUE
-
- # Position both cursors at get -first. They should find the same data.
- set get_first [$dbcw get -first]
- set get_first_dup [$dup_dbcw get -first]
- error_check_good dup_read $get_first $get_first_dup
-
- # Test with -after and -before. Skip for -dupsort.
- if { [is_substr $oargs dupsort] != 1 } {
- # Original and duplicate cursors both point to first item.
- # Do a put -before of new string with original cursor,
- # and a put -after of new string with duplicate cursor.
- set newdata "newdata"
- error_check_good put_before [$dbcw put -before $newdata] 0
- error_check_good put_after [$dup_dbcw put -after $newdata] 0
-
- # Now walk forward with original cursor ...
- set first [$dbcw get -first]
- error_check_good check_first [lindex [lindex $first 0] 1] $newdata
- set next1 [$dbcw get -next]
- error_check_good check_next1 $next1 $get_first
- set next2 [$dbcw get -next]
- error_check_good check_next2 [lindex [lindex $next2 0] 1] $newdata
-
- # ... and backward with duplicate cursor.
- set current [$dup_dbcw get -current]
- error_check_good check_current [lindex [lindex $current 0] 1] $newdata
- set prev1 [$dup_dbcw get -prev]
- error_check_good check_prev1 $prev1 $get_first
- set prev2 [$dup_dbcw get -prev]
- error_check_good check_prev2 [lindex [lindex $prev2 0] 1] $newdata
- }
-
- puts "\tTest089.h: test delete then get of off-page dups"
- for {set kd [$dbcw get -first] } { [llength $kd] != 0 } \
- {set kd [$dbcw get -next] } {
- error_check_good dbcdel [$dbcw del] 0
- }
-
- error_check_good dbcw_close [$dbcw close] 0
- error_check_good dup_dbcw_close [$dup_dbcw close] 0
-
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
-}
diff --git a/storage/bdb/test/test090.tcl b/storage/bdb/test/test090.tcl
deleted file mode 100644
index 7c46c56c0a7..00000000000
--- a/storage/bdb/test/test090.tcl
+++ /dev/null
@@ -1,16 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test090.tcl,v 11.15 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test090
-# TEST Test for functionality near the end of the queue using test001.
-proc test090 { method {nentries 10000} {tnum "090"} args} {
- if { [is_queueext $method ] == 0 } {
- puts "Skipping test$tnum for $method."
- return;
- }
- eval {test001 $method $nentries 4294967000 0 $tnum} $args
-}
diff --git a/storage/bdb/test/test091.tcl b/storage/bdb/test/test091.tcl
deleted file mode 100644
index 81cabb867b9..00000000000
--- a/storage/bdb/test/test091.tcl
+++ /dev/null
@@ -1,20 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test091.tcl,v 11.10 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test091
-# TEST Test of DB_CONSUME_WAIT.
-proc test091 { method {nconsumers 4} \
- {nproducers 2} {nitems 1000} {start 0 } {tnum "091"} args} {
- if { [is_queue $method ] == 0 } {
- puts "Skipping test0$tnum for $method."
- return;
- }
- eval {test070 $method \
- $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
- eval {test070 $method \
- $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
-}
diff --git a/storage/bdb/test/test092.tcl b/storage/bdb/test/test092.tcl
deleted file mode 100644
index ef4c822d821..00000000000
--- a/storage/bdb/test/test092.tcl
+++ /dev/null
@@ -1,247 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test092.tcl,v 11.18 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test092
-# TEST Test of DB_DIRTY_READ [#3395]
-# TEST
-# TEST We set up a database with nentries in it. We then open the
-# TEST database read-only twice. One with dirty read and one without.
-# TEST We open the database for writing and update some entries in it.
-# TEST Then read those new entries via db->get (clean and dirty), and
-# TEST via cursors (clean and dirty).
-proc test092 { method {nentries 1000} args } {
- source ./include.tcl
- #
- # If we are using an env, then skip this test. It needs its own.
- set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- puts "Test092 skipping for env $env"
- return
- }
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- puts "Test092: Dirty Read Test $method $nentries"
-
- # Create the database and open the dictionary
- set testfile test092.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
-
- env_cleanup $testdir
-
- set lmax [expr $nentries * 2]
- set lomax [expr $nentries * 2]
- set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
- -lock_max_locks $lmax -lock_max_objects $lomax]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- set db [eval {berkdb_open -env $env -create \
- -mode 0644 $omethod} $args {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Here is the loop where we put each key/data pair.
- # Key is entry, data is entry also.
- puts "\tTest092.a: put loop"
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- set ret [eval {$db put} {$key [chop_data $method $str]}]
- error_check_good put:$db $ret 0
- incr count
- }
- close $did
- error_check_good close:$db [$db close] 0
-
- puts "\tTest092.b: Opening all the handles"
- #
- # Open all of our handles.
- # We need:
- # 1. Our main txn (t).
- # 2. A txn that can read dirty data (tdr).
- # 3. A db handle for writing via txn (dbtxn).
- # 4. A db handle for clean data (dbcl).
- # 5. A db handle for dirty data (dbdr).
- # 6. A cursor handle for dirty txn data (clean db handle using
- # the dirty txn handle on the cursor call) (dbccl1).
- # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
- # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
- set t [$env txn]
- error_check_good txnbegin [is_valid_txn $t $env] TRUE
-
- set tdr [$env txn -dirty]
- error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
- set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
- -mode 0644 $omethod} {$testfile}]
- error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
-
- set dbcl [eval {berkdb_open -auto_commit -env $env \
- -rdonly -mode 0644 $omethod} {$testfile}]
- error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
-
- set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
- -rdonly -mode 0644 $omethod} {$testfile}]
- error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
-
- set dbccl [$dbcl cursor -txn $tdr]
- error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
-
- set dbcdr0 [$dbdr cursor]
- error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
-
- set dbcdr1 [$dbdr cursor -dirty]
- error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
-
- # Test that $db stat can use -dirty flag.
- puts "\tTest092.c: Smoke test for db_stat -txn -dirty"
- if { [catch {set statret [$dbcl stat -txn $t -dirty]} res] } {
- puts "FAIL: db_stat -txn -dirty returned $res"
- }
-
- #
- # Now that we have all of our handles, change all the data in there
- # to be the key and data the same, but data is capitalized.
- puts "\tTest092.d: put/get data within a txn"
- set gflags ""
- if { [is_record_based $method] == 1 } {
- set checkfunc test092dr_recno.check
- append gflags " -recno"
- } else {
- set checkfunc test092dr.check
- }
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- set ustr [string toupper $str]
- set clret [list [list $key [pad_data $method $str]]]
- set drret [list [list $key [pad_data $method $ustr]]]
- #
- # Put the data in the txn.
- #
- set ret [eval {$dbtxn put} -txn $t \
- {$key [chop_data $method $ustr]}]
- error_check_good put:$dbtxn $ret 0
-
- #
- # Now get the data using the different db handles and
- # make sure it is dirty or clean data.
- #
- # Using the dirty txn should show us dirty data
- set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
- error_check_good dbdr2:get $ret $drret
-
- set ret [eval {$dbdr get -dirty} $gflags {$key}]
- error_check_good dbdr1:get $ret $drret
-
- set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
- error_check_good dbdr2:get $ret $drret
-
- incr count
- }
- close $did
-
- puts "\tTest092.e: Check dirty data using dirty txn and clean db/cursor"
- dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
-
- puts "\tTest092.f: Check dirty data using -dirty cget flag"
- dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
-
- puts "\tTest092.g: Check dirty data using -dirty cursor"
- dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
-
- #
- # We must close these before aborting the real txn
- # because they all hold read locks on the pages.
- #
- error_check_good dbccl:close [$dbccl close] 0
- error_check_good dbcdr0:close [$dbcdr0 close] 0
- error_check_good dbcdr1:close [$dbcdr1 close] 0
-
- #
- # Now abort the modifying transaction and rerun the data checks.
- #
- puts "\tTest092.h: Aborting the write-txn"
- error_check_good txnabort [$t abort] 0
-
- set dbccl [$dbcl cursor -txn $tdr]
- error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
-
- set dbcdr0 [$dbdr cursor]
- error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
-
- set dbcdr1 [$dbdr cursor -dirty]
- error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
-
- if { [is_record_based $method] == 1 } {
- set checkfunc test092cl_recno.check
- } else {
- set checkfunc test092cl.check
- }
- puts "\tTest092.i: Check clean data using -dirty cget flag"
- dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
-
- puts "\tTest092.j: Check clean data using -dirty cget flag"
- dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
-
- puts "\tTest092.k: Check clean data using -dirty cursor"
- dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
-
- # Clean up our handles
- error_check_good dbccl:close [$dbccl close] 0
- error_check_good tdrcommit [$tdr commit] 0
- error_check_good dbcdr0:close [$dbcdr0 close] 0
- error_check_good dbcdr1:close [$dbcdr1 close] 0
- error_check_good dbclose [$dbcl close] 0
- error_check_good dbclose [$dbdr close] 0
- error_check_good dbclose [$dbtxn close] 0
- error_check_good envclose [$env close] 0
-}
-
-# Check functions for test092; keys and data are identical
-# Clean checks mean keys and data are identical.
-# Dirty checks mean data are uppercase versions of keys.
-proc test092cl.check { key data } {
- error_check_good "key/data mismatch" $key $data
-}
-
-proc test092cl_recno.check { key data } {
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
-}
-
-proc test092dr.check { key data } {
- error_check_good "key/data mismatch" $key [string tolower $data]
-}
-
-proc test092dr_recno.check { key data } {
- global kvals
-
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data \
- [string toupper $kvals($key)]
-}
-
diff --git a/storage/bdb/test/test093.tcl b/storage/bdb/test/test093.tcl
deleted file mode 100644
index 3ed4b596403..00000000000
--- a/storage/bdb/test/test093.tcl
+++ /dev/null
@@ -1,393 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test093.tcl,v 11.23 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test093
-# TEST Test using set_bt_compare.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-proc test093 { method {nentries 10000} {tnum "093"} args} {
- source ./include.tcl
- global btvals
- global btvalsck
- global errorInfo
-
- set dbargs [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_btree $method] != 1 } {
- puts "Test$tnum: skipping for method $method."
- return
- }
- set txnenv 0
- set eindex [lsearch -exact $dbargs "-env"]
- if { $eindex != -1 } {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $dbargs $eindex]
- set rpcenv [is_rpcenv $env]
- if { $rpcenv == 1 } {
- puts "Test$tnum: skipping for RPC"
- return
- }
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append dbargs " -auto_commit "
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- cleanup $testdir $env
- }
- puts "Test$tnum: $method ($args) $nentries using btcompare"
-
-
- test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
- test093_runbig $omethod $dbargs $nentries $tnum \
- test093_cmp1 test093_sort1
- test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
- #
- # Don't bother running the second, really slow, comparison
- # function on test093_runbig (file contents).
-
- # Clean up so verification doesn't fail. (There's currently
- # no way to specify a comparison function to berkdb dbverify.)
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set eindex [lsearch -exact $dbargs "-env"]
- if { $eindex == -1 } {
- set env NULL
- } else {
- incr eindex
- set env [lindex $dbargs $eindex]
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-}
-
-proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
- source ./include.tcl
- global btvals
- global btvalsck
-
- # Create the database and open the dictionary
- set eindex [lsearch -exact $dbargs "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set txnenv 0
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $dbargs $eindex]
- set txnenv [is_txnenv $env]
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set db [eval {berkdb_open -btcompare $cmpfunc \
- -create -mode 0644} $method $dbargs $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set pflags ""
- set gflags ""
- set txn ""
- set btvals {}
- set btvalsck {}
- set checkfunc test093_check
- puts "\tTest$tnum.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- set key $str
- set str [reverse $str]
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval \
- {$db put} $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- lappend btvals $key
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
-
- incr count
- }
- close $did
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary (or ints)
- set q q
- filehead $nentries $dict $t2
- filesort $t2 $t3
- file rename -force $t3 $t2
- filesort $t1 $t3
-
- error_check_good Test$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tTest$tnum.c: dump file in order"
- # Now, reopen the file and run the last test again.
- # We open it here, ourselves, because all uses of the db
- # need to have the correct comparison func set. Then
- # call dump_file_direction directly.
- set btvalsck {}
- set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
- $dbargs $method $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- #
- # We need to sort btvals according to the comparison function.
- # Once that is done, btvalsck and btvals should be the same.
- puts "\tTest$tnum.d: check file order"
-
- $sortfunc
-
- error_check_good btvals:len [llength $btvals] [llength $btvalsck]
- for {set i 0} {$i < $nentries} {incr i} {
- error_check_good vals:$i [lindex $btvals $i] \
- [lindex $btvalsck $i]
- }
-}
-
-proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
- source ./include.tcl
- global btvals
- global btvalsck
-
- # Create the database and open the dictionary
- set eindex [lsearch -exact $dbargs "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- set txnenv 0
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum.db
- set env NULL
- } else {
- set testfile test$tnum.db
- incr eindex
- set env [lindex $dbargs $eindex]
- set txnenv [is_txnenv $env]
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- set db [eval {berkdb_open -btcompare $cmpfunc \
- -create -mode 0644} $method $dbargs $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
- set t5 $testdir/t5
- set pflags ""
- set gflags ""
- set txn ""
- set btvals {}
- set btvalsck {}
- set checkfunc test093_checkbig
- puts "\tTest$tnum.e:\
- big key put/get loop key=filecontents data=filename"
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list 1]
-
- set count 0
- foreach f $file_list {
- set fid [open $f r]
- fconfigure $fid -translation binary
- set key [read $fid]
- close $fid
-
- set key $f$key
-
- set fcopy [open $t5 w]
- fconfigure $fcopy -translation binary
- puts -nonewline $fcopy $key
- close $fcopy
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} $txn $pflags {$key \
- [chop_data $method $f]}]
- error_check_good put_file $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- lappend btvals $key
-
- # Should really catch errors
- set fid [open $t4 w]
- fconfigure $fid -translation binary
- if [catch {eval {$db get} $gflags {$key}} data] {
- puts -nonewline $fid $data
- } else {
- # Data looks like {{key data}}
- set key [lindex [lindex $data 0] 0]
- puts -nonewline $fid $key
- }
- close $fid
- error_check_good \
- Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
-
- incr count
- }
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.f: big dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest$tnum.g: dump file in order"
- # Now, reopen the file and run the last test again.
- # We open it here, ourselves, because all uses of the db
- # need to have the correct comparison func set. Then
- # call dump_file_direction directly.
-
- set btvalsck {}
- set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
- $dbargs $method $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- #
- # We need to sort btvals according to the comparison function.
- # Once that is done, btvalsck and btvals should be the same.
- puts "\tTest$tnum.h: check file order"
-
- $sortfunc
- error_check_good btvals:len [llength $btvals] [llength $btvalsck]
-
- set end [llength $btvals]
- for {set i 0} {$i < $end} {incr i} {
- error_check_good vals:$i [lindex $btvals $i] \
- [lindex $btvalsck $i]
- }
-}
-
-# Simple bt comparison.
-proc test093_cmp1 { a b } {
- return [string compare $b $a]
-}
-
-# Simple bt sorting.
-proc test093_sort1 {} {
- global btvals
- #
- # This one is easy, just sort in reverse.
- #
- set btvals [lsort -decreasing $btvals]
-}
-
-proc test093_cmp2 { a b } {
- set arev [reverse $a]
- set brev [reverse $b]
- return [string compare $arev $brev]
-}
-
-proc test093_sort2 {} {
- global btvals
-
- # We have to reverse them, then sorts them.
- # Then reverse them back to real words.
- set rbtvals {}
- foreach i $btvals {
- lappend rbtvals [reverse $i]
- }
- set rbtvals [lsort -increasing $rbtvals]
- set newbtvals {}
- foreach i $rbtvals {
- lappend newbtvals [reverse $i]
- }
- set btvals $newbtvals
-}
-
-# Check function for test093; keys and data are identical
-proc test093_check { key data } {
- global btvalsck
-
- error_check_good "key/data mismatch" $data [reverse $key]
- lappend btvalsck $key
-}
-
-# Check function for test093 big keys;
-proc test093_checkbig { key data } {
- source ./include.tcl
- global btvalsck
-
- set fid [open $data r]
- fconfigure $fid -translation binary
- set cont [read $fid]
- close $fid
- error_check_good "key/data mismatch" $key $data$cont
- lappend btvalsck $key
-}
-
diff --git a/storage/bdb/test/test094.tcl b/storage/bdb/test/test094.tcl
deleted file mode 100644
index 20f2b3af3ab..00000000000
--- a/storage/bdb/test/test094.tcl
+++ /dev/null
@@ -1,189 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test094.tcl,v 11.21 2004/06/29 14:26:17 carol Exp $
-#
-# TEST test094
-# TEST Test using set_dup_compare.
-# TEST
-# TEST Use the first 10,000 entries from the dictionary.
-# TEST Insert each with self as key and data; retrieve each.
-# TEST After all are entered, retrieve all; compare output to original.
-# TEST Close file, reopen, do retrieve and re-verify.
-proc test094 { method {nentries 10000} {ndups 10} {tnum "094"} args} {
- source ./include.tcl
- global errorInfo
-
- set dbargs [convert_args $method $args]
- set omethod [convert_method $method]
-
- if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
- puts "Test$tnum: skipping for method $method."
- return
- }
-
- set txnenv 0
- set eindex [lsearch -exact $dbargs "-env"]
- # Create the database and open the dictionary
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-a.db
- set env NULL
- } else {
- set testfile test$tnum-a.db
- incr eindex
- set env [lindex $dbargs $eindex]
- set rpcenv [is_rpcenv $env]
- if { $rpcenv == 1 } {
- puts "Test$tnum: skipping for RPC"
- return
- }
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append dbargs " -auto_commit "
- if { $nentries == 10000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- }
- set testdir [get_home $env]
- }
- puts "Test$tnum: $method ($args) $nentries \
- with $ndups dups using dupcompare"
-
- cleanup $testdir $env
-
- set db [eval {berkdb_open -dupcompare test094_cmp \
- -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
- set t1 $testdir/t1
- set pflags ""
- set gflags ""
- set txn ""
- puts "\tTest$tnum.a: $nentries put/get duplicates loop"
- # Here is the loop where we put and get each key/data pair
- set count 0
- set dlist {}
- for {set i 0} {$i < $ndups} {incr i} {
- set dlist [linsert $dlist 0 $i]
- }
- while { [gets $did str] != -1 && $count < $nentries } {
- set key $str
- for {set i 0} {$i < $ndups} {incr i} {
- set data $i:$str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $omethod $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get [llength $ret] $ndups
- incr count
- }
- close $did
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest$tnum.b: traverse checking duplicates before close"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Set up second testfile so truncate flag is not needed.
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test$tnum-b.db
- set env NULL
- } else {
- set testfile test$tnum-b.db
- set env [lindex $dbargs $eindex]
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- #
- # Test dupcompare with data items big enough to force offpage dups.
- #
- puts "\tTest$tnum.c: big key put/get dup loop key=filename data=filecontents"
- set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
- -create -mode 0644} $omethod $dbargs $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Here is the loop where we put and get each key/data pair
- set file_list [get_file_list 1]
- if { [llength $file_list] > $nentries } {
- set file_list [lrange $file_list 1 $nentries]
- }
-
- set count 0
- foreach f $file_list {
- set fid [open $f r]
- fconfigure $fid -translation binary
- set cont [read $fid]
- close $fid
-
- set key $f
- for {set i 0} {$i < $ndups} {incr i} {
- set data $i:$cont
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $omethod $data]}]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- }
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get [llength $ret] $ndups
- incr count
- }
-
- puts "\tTest$tnum.d: traverse checking duplicates before close"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dup_file_check $db $txn $t1 $dlist
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- set testdir [get_home $env]
- }
- error_check_good db_close [$db close] 0
-
- # Clean up the test directory, since there's currently
- # no way to specify a dup_compare function to berkdb dbverify
- # and without one it will fail.
- cleanup $testdir $env
-}
-
-# Simple dup comparison.
-proc test094_cmp { a b } {
- return [string compare $b $a]
-}
diff --git a/storage/bdb/test/test095.tcl b/storage/bdb/test/test095.tcl
deleted file mode 100644
index 9c62a6a51ef..00000000000
--- a/storage/bdb/test/test095.tcl
+++ /dev/null
@@ -1,367 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test095.tcl,v 11.30 2004/09/22 18:01:06 bostic Exp $
-#
-# TEST test095
-# TEST Bulk get test for methods supporting dups. [#2934]
-proc test095 { method {tnum "095"} args } {
- source ./include.tcl
- global is_je_test
- global is_qnx_test
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set basename $testdir/test$tnum
- set env NULL
- # If we've our own env, no reason to swap--this isn't
- # an mpool test.
- set carg { -cachesize {0 25000000 0} }
- } else {
- set basename test$tnum
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- puts "Skipping for environment with txns"
- return
- }
- set testdir [get_home $env]
- set carg {}
- }
- cleanup $testdir $env
-
- puts "Test$tnum: $method ($args) Bulk get test"
-
- # Tcl leaves a lot of memory allocated after this test
- # is run in the tclsh. This ends up being a problem on
- # QNX runs as later tests then run out of memory.
- if { $is_qnx_test } {
- puts "Test$tnum skipping for QNX"
- return
- }
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test$tnum skipping for method $method"
- return
- }
-
- # The test's success is dependent on the relationship between
- # the amount of data loaded and the buffer sizes we pick, so
- # these parameters don't belong on the command line.
- set nsets 300
- set noverflows 25
-
- # We run the meat of the test twice: once with unsorted dups,
- # once with sorted dups.
- foreach { dflag sort } { -dup unsorted {-dup -dupsort} sorted } {
- if { $is_je_test && $sort == "unsorted" } {
- continue
- }
-
- set testfile $basename-$sort.db
- set did [open $dict]
-
- # Open and populate the database with $nsets sets of dups.
- # Each set contains as many dups as its number
- puts "\tTest$tnum.a:\
- Creating database with $nsets sets of $sort dups."
- set dargs "$dflag $carg $args"
- set db [eval {berkdb_open_noerr -create} \
- $omethod $dargs $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
- t95_populate $db $did $nsets 0
-
- # Determine the pagesize so we can use it to size the buffer.
- set stat [$db stat]
- set pagesize [get_pagesize $stat]
-
- # Run basic get tests.
- #
- # A small buffer will fail if it is smaller than the pagesize.
- # Skip small buffer tests if the page size is so small that
- # we can't define a buffer smaller than the page size.
- # (Buffers must be 1024 or multiples of 1024.)
- #
- # A big buffer of 66560 (64K + 1K) should always be large
- # enough to contain the data, so the test should succeed
- # on all platforms. We picked this number because it
- # is larger than the largest allowed pagesize, so the test
- # always fills more than a page at some point.
-
- set maxpage [expr 1024 * 64]
- set bigbuf [expr $maxpage + 1024]
- set smallbuf 1024
-
- if { $pagesize > 1024 } {
- t95_gettest $db $tnum b $smallbuf 1
- } else {
- puts "Skipping small buffer test Test$tnum.b"
- }
- t95_gettest $db $tnum c $bigbuf 0
-
- # Run cursor get tests.
- if { $pagesize > 1024 } {
- t95_cgettest $db $tnum b $smallbuf 1
- } else {
- puts "Skipping small buffer test Test$tnum.d"
- }
- t95_cgettest $db $tnum e $bigbuf 0
-
- # Run invalid flag combination tests
- # Sync and reopen test file so errors won't be sent to stderr
- error_check_good db_sync [$db sync] 0
- set noerrdb [eval berkdb_open_noerr $dargs $testfile]
- t95_flagtest $noerrdb $tnum f [expr 8192]
- t95_cflagtest $noerrdb $tnum g [expr 100]
- error_check_good noerrdb_close [$noerrdb close] 0
-
- # Set up for overflow tests
- set max [expr 4096 * $noverflows]
- puts "\tTest$tnum.h: Add $noverflows overflow sets\
- to database (max item size $max)"
- t95_populate $db $did $noverflows 4096
-
- # Run overflow get tests. The overflow test fails with
- # our standard big buffer doubled, but succeeds with a
- # buffer sized to handle $noverflows pairs of data of
- # size $max.
- t95_gettest $db $tnum i $bigbuf 1
- t95_gettest $db $tnum j [expr $bigbuf * 2] 1
- t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
-
- # Run overflow cursor get tests.
- t95_cgettest $db $tnum l $bigbuf 1
- # Expand buffer to accommodate basekey as well as the padding.
- t95_cgettest $db $tnum m [expr ($max + 512) * 2] 0
-
- error_check_good db_close [$db close] 0
- close $did
- }
-}
-
-proc t95_gettest { db tnum letter bufsize expectfail } {
- t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
-}
-proc t95_cgettest { db tnum letter bufsize expectfail } {
- t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
-}
-proc t95_flagtest { db tnum letter bufsize } {
- t95_flagtest_body $db $tnum $letter $bufsize 0
-}
-proc t95_cflagtest { db tnum letter bufsize } {
- t95_flagtest_body $db $tnum $letter $bufsize 1
-}
-
-# Basic get test
-proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
- global errorCode
-
- foreach flag { multi multi_key } {
- if { $usecursor == 0 } {
- if { $flag == "multi_key" } {
- # db->get does not allow multi_key
- continue
- } else {
- set action "db get -$flag"
- }
- } else {
- set action "dbc get -$flag -set/-next"
- }
- puts "\tTest$tnum.$letter: $action with bufsize $bufsize"
- set allpassed TRUE
- set saved_err ""
-
- # Cursor for $usecursor.
- if { $usecursor != 0 } {
- set getcurs [$db cursor]
- error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
- }
-
- # Traverse DB with cursor; do get/c_get($flag) on each item.
- set dbc [$db cursor]
- error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
- for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
- { set dbt [$dbc get -nextnodup] } {
- set key [lindex [lindex $dbt 0] 0]
- set datum [lindex [lindex $dbt 0] 1]
-
- if { $usecursor == 0 } {
- set ret [catch {eval $db get -$flag $bufsize $key} res]
- } else {
- set res {}
- for { set ret [catch {eval $getcurs get -$flag $bufsize\
- -set $key} tres] } \
- { $ret == 0 && [llength $tres] != 0 } \
- { set ret [catch {eval $getcurs get -$flag $bufsize\
- -nextdup} tres]} {
- eval lappend res $tres
- }
- }
-
- # If we expect a failure, be more tolerant if the above
- # fails; just make sure it's a DB_BUFFER_SMALL or an
- # EINVAL (if the buffer is smaller than the pagesize,
- # it's EINVAL), mark it, and move along.
- if { $expectfail != 0 && $ret != 0 } {
- if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \
- [is_substr $errorCode EINVAL] != 1 } {
- error_check_good \
- "$flag failure errcode" \
- $errorCode "DB_BUFFER_SMALL or EINVAL"
- }
- set allpassed FALSE
- continue
- }
- error_check_good "get_$flag ($key)" $ret 0
- if { $flag == "multi_key" } {
- t95_verify $res TRUE
- } else {
- t95_verify $res FALSE
- }
- }
- set ret [catch {eval $db get -$flag $bufsize} res]
-
- if { $expectfail == 1 } {
- error_check_good allpassed $allpassed FALSE
- puts "\t\tTest$tnum.$letter:\
- returned at least one DB_BUFFER_SMALL (as expected)"
- } else {
- error_check_good allpassed $allpassed TRUE
- puts "\t\tTest$tnum.$letter: succeeded (as expected)"
- }
-
- error_check_good dbc_close [$dbc close] 0
- if { $usecursor != 0 } {
- error_check_good getcurs_close [$getcurs close] 0
- }
- }
-}
-
-# Test of invalid flag combinations
-proc t95_flagtest_body { db tnum letter bufsize usecursor } {
- global errorCode
-
- foreach flag { multi multi_key } {
- if { $usecursor == 0 } {
- if { $flag == "multi_key" } {
- # db->get does not allow multi_key
- continue
- } else {
- set action "db get -$flag"
- }
- } else {
- set action "dbc get -$flag"
- }
- puts "\tTest$tnum.$letter: $action with invalid flag combinations"
-
- # Cursor for $usecursor.
- if { $usecursor != 0 } {
- set getcurs [$db cursor]
- error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
- }
-
- if { $usecursor == 0 } {
- # Disallowed flags for db->get
- set badflags [list consume consume_wait {rmw some_key}]
-
- foreach badflag $badflags {
- catch {eval $db get -$flag $bufsize -$badflag} ret
- error_check_good \
- db:get:$flag:$badflag [is_substr $errorCode EINVAL] 1
- }
- } else {
- # Disallowed flags for db->cget
- set cbadflags [list last get_recno join_item \
- {multi_key 1000} prev prevnodup]
-
- set dbc [$db cursor]
- $dbc get -first
- foreach badflag $cbadflags {
- catch {eval $dbc get -$flag $bufsize -$badflag} ret
- error_check_good dbc:get:$flag:$badflag \
- [is_substr $errorCode EINVAL] 1
- }
- error_check_good dbc_close [$dbc close] 0
- }
- if { $usecursor != 0 } {
- error_check_good getcurs_close [$getcurs close] 0
- }
- }
- puts "\t\tTest$tnum.$letter completed"
-}
-
-# Verify that a passed-in list of key/data pairs all match the predicted
-# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
-proc t95_verify { res multiple_keys } {
- global alphabet
-
- set i 0
- set orig_key [lindex [lindex $res 0] 0]
- set nkeys [string trim $orig_key $alphabet']
- set base_key [string trim $orig_key 0123456789]
- set datum_count 0
-
- while { 1 } {
- set key [lindex [lindex $res $i] 0]
- set datum [lindex [lindex $res $i] 1]
- if { $datum_count >= $nkeys } {
- if { [llength $key] != 0 } {
- # If there are keys beyond $nkeys, we'd
- # better have multiple_keys set.
- error_check_bad "keys beyond number $i allowed"\
- $multiple_keys FALSE
-
- # If multiple_keys is set, accept the new key.
- set orig_key $key
- set nkeys [eval string trim \
- $orig_key {$alphabet'}]
- set base_key [eval string trim \
- $orig_key 0123456789]
- set datum_count 0
- } else {
- # datum_count has hit nkeys. We're done.
- return
- }
- }
-
- error_check_good returned_key($i) $key $orig_key
- error_check_good returned_datum($i) \
- $datum $base_key.[format %4u $datum_count]
- incr datum_count
- incr i
- }
-}
-
-# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
-# with "word" having (i * pad_bytes) bytes extra padding.
-proc t95_populate { db did nsets pad_bytes } {
- set txn ""
- for { set i 1 } { $i <= $nsets } { incr i } {
- # basekey is a padded dictionary word
- gets $did basekey
-
- append basekey [repeat "a" [expr $pad_bytes * $i]]
-
- # key is basekey with the number of dups stuck on.
- set key $basekey$i
-
- for { set j 0 } { $j < $i } { incr j } {
- set data $basekey.[format %4u $j]
- error_check_good db_put($key,$data) \
- [eval {$db put} $txn {$key $data}] 0
- }
- }
-
- # This will make debugging easier, and since the database is
- # read-only from here out, it's cheap.
- error_check_good db_sync [$db sync] 0
-}
diff --git a/storage/bdb/test/test096.tcl b/storage/bdb/test/test096.tcl
deleted file mode 100644
index ac8450069cc..00000000000
--- a/storage/bdb/test/test096.tcl
+++ /dev/null
@@ -1,371 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test096.tcl,v 11.26 2004/06/10 17:21:20 carol Exp $
-#
-# TEST test096
-# TEST Db->truncate test.
-# TEST For all methods:
-# TEST Test that truncate empties an existing database.
-# TEST Test that truncate-write in an aborted txn doesn't
-# TEST change the original contents.
-# TEST Test that truncate-write in a committed txn does
-# TEST overwrite the original contents.
-# TEST For btree and hash, do the same in a database with offpage dups.
-proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} {
- global fixed_len
- global alphabet
- source ./include.tcl
-
- set orig_fixed_len $fixed_len
- set args [convert_args $method $args]
- set encargs ""
- set args [split_encargs $args encargs]
- set omethod [convert_method $method]
-
- puts "Test096: $method db truncate method test"
- set pgindex [lsearch -exact $args "-pagesize"]
- if { $pgindex != -1 } {
- puts "Test096: Skipping for specific pagesizes"
- return
- }
-
- # Create the database and open the dictionary
- set eindex [lsearch -exact $args "-env"]
- set testfile test096.db
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 0 } {
- puts "Environment w/o txns specified; skipping."
- return
- }
- if { $nentries == 1000 } {
- set nentries 100
- }
- reduce_dups nentries ndups
- set testdir [get_home $env]
- set closeenv 0
- } else {
- env_cleanup $testdir
-
- # We need an env for exclusive-use testing. Since we are
- # using txns, we need at least 1 lock per record for queue.
- set lockmax [expr $nentries * 2]
- set env [eval {berkdb_env -create -home $testdir \
- -lock_max $lockmax -txn} $encargs]
- error_check_good env_create [is_valid_env $env] TRUE
- set closeenv 1
- }
-
- set t1 $testdir/t1
-
- puts "\tTest096.a: Create database with $nentries entries"
- set db [eval {berkdb_open -create -auto_commit \
- -env $env $omethod -mode 0644} $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
- t96_populate $db $omethod $env $nentries
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.b: Truncate database"
- set dbtr [eval {berkdb_open -create -auto_commit \
- -env $env $omethod -mode 0644} $args $testfile]
- error_check_good db_open [is_valid_db $dbtr] TRUE
-
- set ret [$dbtr truncate -auto_commit]
- error_check_good dbtrunc $ret $nentries
- error_check_good db_close [$dbtr close] 0
-
- set db [eval {berkdb_open -env $env} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number 0
- error_check_good dbclose [$db close] 0
- error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
-
- # Remove and recreate database.
- puts "\tTest096.d: Recreate database with $nentries entries"
- set db [eval {berkdb_open -create -auto_commit \
- -env $env $omethod -mode 0644} $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
- t96_populate $db $omethod $env $nentries
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.e: Truncate and write in a txn, then abort"
- txn_truncate $env $omethod $testfile $nentries abort 1
-
- set db [eval {berkdb_open -env $env} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Database should have original contents since both the truncate
- # and the write were aborted
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number $nentries
- error_check_good dbclose [$db close] 0
-
- error_check_good dbverify [verify_dir $testdir "\tTest096.f: "] 0
-
- puts "\tTest096.g: Truncate and write in a txn, then commit"
- txn_truncate $env $omethod $testfile $nentries commit 1
-
- set db [eval {berkdb_open -env $env} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Database should contain only the new items
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number [expr $nentries / 2]
- error_check_good dbclose [$db close] 0
- error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
-
- puts "\tTest096.i: Check proper handling of overflow pages."
- # Large keys and data compared to page size guarantee
- # overflow pages.
- if { [is_fixed_length $method] == 1 } {
- puts "Skipping overflow test for fixed-length method."
- } else {
- set overflowfile overflow096.db
- set data [repeat $alphabet 600]
- set db [eval {berkdb_open -create -auto_commit -pagesize 512 \
- -env $env $omethod -mode 0644} $args $overflowfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set noverflows 100
- for { set i 1 } { $i <= $noverflows } { incr i } {
- set ret [eval {$db put} -auto_commit \
- $i [chop_data $method "$i$data"]]
- }
-
- set stat [$db stat]
- error_check_bad stat:overflow [is_substr $stat \
- "{{Overflow pages} 0}"] 1
-
- error_check_good overflow_truncate [$db truncate] $noverflows
- error_check_good overflow_close [$db close] 0
- }
-
- # Remove database and create a new one with dups. Skip
- # the rest of the test for methods not supporting dups.
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Skipping remainder of test096 for method $method"
- if { $closeenv == 1 } {
- error_check_good envclose [$env close] 0
- }
- return
- }
- set ret [berkdb dbremove -env $env -auto_commit $testfile]
- set ret [berkdb dbremove -env $env -auto_commit $overflowfile]
-
- puts "\tTest096.j: Create $nentries entries with $ndups duplicates"
- set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
- -create -env $env $omethod -mode 0644} $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- t96_populate $db $omethod $env $nentries $ndups
-
- set dlist ""
- for { set i 1 } {$i <= $ndups} {incr i} {
- lappend dlist $i
- }
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- dup_check $db $txn $t1 $dlist
- error_check_good txn [$t commit] 0
- puts "\tTest096.k: Verify off page duplicates status"
- set stat [$db stat]
- error_check_bad stat:offpage [is_substr $stat \
- "{{Duplicate pages} 0}"] 1
-
- set recs [expr $ndups * $nentries]
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.l: Truncate database in a txn then abort"
- txn_truncate $env $omethod $testfile $recs abort
-
- set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number $recs
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.m: Truncate database in a txn then commit"
- txn_truncate $env $omethod $testfile $recs commit
-
- set db [berkdb_open -auto_commit -env $env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number 0
- error_check_good dbclose [$db close] 0
-
- set testdir [get_home $env]
- error_check_good dbverify [verify_dir $testdir "\tTest096.n: "] 0
-
- # Remove database, and create a new one with dups. Test
- # truncate + write within a transaction.
- puts "\tTest096.o: Create $nentries entries with $ndups duplicates"
- set ret [berkdb dbremove -env $env -auto_commit $testfile]
- set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
- -create -env $env $omethod -mode 0644} $args $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- t96_populate $db $omethod $env $nentries $ndups
-
- set dlist ""
- for { set i 1 } {$i <= $ndups} {incr i} {
- lappend dlist $i
- }
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- dup_check $db $txn $t1 $dlist
- error_check_good txn [$t commit] 0
- puts "\tTest096.p: Verify off page duplicates status"
- set stat [$db stat]
- error_check_bad stat:offpage [is_substr $stat \
- "{{Duplicate pages} 0}"] 1
-
- set recs [expr $ndups * $nentries]
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.q: Truncate and write in a txn, then abort"
- txn_truncate $env $omethod $testfile $recs abort 1
-
- set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number $recs
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.r: Truncate and write in a txn, then commit"
- txn_truncate $env $omethod $testfile $recs commit 1
-
- set db [berkdb_open -auto_commit -env $env $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- set number [number_of_entries $db $method]
- error_check_good number_of_entries $number [expr $recs / 2]
- error_check_good dbclose [$db close] 0
-
- puts "\tTest096.s: Check overflow pages with dups."
- set ndups 3
- set db [eval {berkdb_open -create -auto_commit -pagesize 512 \
- -env $env $omethod -dup -mode 0644} $args $overflowfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- for { set i 1 } { $i <= $noverflows } { incr i } {
- for { set j 0 } { $j < $ndups } { incr j } {
- set ret [eval {$db put} -auto_commit \
- $i [chop_data $method "$i.$j$data"]]
- }
- }
-
- set stat [$db stat]
- error_check_bad stat:overflow [is_substr $stat \
- "{{Overflow pages} 0}"] 1
-
- set nentries [expr $noverflows * $ndups]
- error_check_good overflow_truncate [$db truncate] $nentries
- error_check_good overflow_close [$db close] 0
-
- set testdir [get_home $env]
- error_check_good dbverify [verify_dir $testdir "\tTest096.t: "] 0
-
- if { $closeenv == 1 } {
- error_check_good envclose [$env close] 0
- }
-}
-
-proc t96_populate {db method env nentries {ndups 1}} {
- global datastr
- global pad_datastr
- source ./include.tcl
-
- set did [open $dict]
- set count 0
- set txn ""
- set pflags ""
- set gflags ""
-
- if { [is_record_based $method] == 1 } {
- append gflags "-recno"
- }
- set pad_datastr [pad_data $method $datastr]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- if { $ndups > 1 } {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- set ret [eval {$db put} $txn $pflags \
- {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- error_check_good txn [$t commit] 0
- }
- } else {
- set datastr [reverse $str]
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- error_check_good txn [$t commit] 0
- }
- set ret [eval {$db get} $gflags {$key}]
- error_check_good $key:dbget [llength $ret] $ndups
- incr count
- }
- close $did
-}
-
-proc number_of_entries { db method } {
- if { [is_record_based $method] == 1 } {
- set dbc [$db cursor]
- set last [$dbc get -last]
- if {[llength $last] == 0} {
- set number 0
- } else {
- set number [lindex [lindex $last 0] 0]
- }
- } else {
- set ret [$db get -glob *]
- set number [llength $ret]
- }
- return $number
-}
-
-# Open database. Truncate in a transaction, optionally with a write
-# included in the transaction as well, then abort or commit. Close database.
-
-proc txn_truncate { env method testfile nentries op {write 0}} {
- set db [eval {berkdb_open -create -auto_commit \
- -env $env $method -mode 0644} $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set txn [$env txn]
- error_check_good txnbegin [is_valid_txn $txn $env] TRUE
-
- set ret [$db truncate -txn $txn]
- error_check_good dbtrunc $ret $nentries
- if { $write == 1 } {
- for {set i 1} {$i <= [expr $nentries / 2]} {incr i} {
- set ret [eval {$db put} -txn $txn \
- {$i [chop_data $method "aaaaaaaaaa"]}]
- error_check_good write $ret 0
- }
- }
-
- error_check_good txn$op [$txn $op] 0
- error_check_good db_close [$db close] 0
-}
-
diff --git a/storage/bdb/test/test097.tcl b/storage/bdb/test/test097.tcl
deleted file mode 100644
index 2a6234e00a5..00000000000
--- a/storage/bdb/test/test097.tcl
+++ /dev/null
@@ -1,188 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test097.tcl,v 11.11 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test097
-# TEST Open up a large set of database files simultaneously.
-# TEST Adjust for local file descriptor resource limits.
-# TEST Then use the first 1000 entries from the dictionary.
-# TEST Insert each with self as key and a fixed, medium length data string;
-# TEST retrieve each. After all are entered, retrieve all; compare output
-# TEST to original.
-
-proc test097 { method {ndbs 500} {nentries 400} args } {
- global pad_datastr
- source ./include.tcl
-
- set largs [convert_args $method $args]
- set encargs ""
- set largs [split_encargs $largs encargs]
-
- # Open an environment, with a 1MB cache.
- set eindex [lsearch -exact $largs "-env"]
- if { $eindex != -1 } {
- incr eindex
- set env [lindex $largs $eindex]
- puts "Test097: $method: skipping for env $env"
- return
- }
- env_cleanup $testdir
- set env [eval {berkdb_env -create \
- -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- # Create the database and open the dictionary
- set basename test097
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- #
- # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
- # we can run out of mutex lock slots due to the nature of this test.
- # So, for this test, increase the number of pages per extent
- # to consume fewer resources.
- #
- if { [is_queueext $method] } {
- set numdb [expr $ndbs / 4]
- set eindex [lsearch -exact $largs "-extent"]
- error_check_bad extent $eindex -1
- incr eindex
- set extval [lindex $largs $eindex]
- set extval [expr $extval * 4]
- set largs [lreplace $largs $eindex $eindex $extval]
- }
- puts -nonewline "Test097: $method ($largs) "
- puts "$nentries entries in at most $ndbs simultaneous databases"
-
- puts "\tTest097.a: Simultaneous open"
- set numdb [test097_open tdb $ndbs $method $env $basename $largs]
- if { $numdb == 0 } {
- puts "\tTest097: Insufficient resources available -- skipping."
- error_check_good envclose [$env close] 0
- return
- }
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put and get each key/data pair
- if { [is_record_based $method] == 1 } {
- append gflags "-recno"
- }
- puts "\tTest097.b: put/get on $numdb databases"
- set datastr "abcdefghij"
- set pad_datastr [pad_data $method $datastr]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
- }
- for { set i 1 } { $i <= $numdb } { incr i } {
- set ret [eval {$tdb($i) put} $txn $pflags \
- {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
- set ret [eval {$tdb($i) get} $gflags {$key}]
- error_check_good get $ret [list [list $key \
- [pad_data $method $datastr]]]
- }
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest097.c: dump and check files"
- for { set j 1 } { $j <= $numdb } { incr j } {
- dump_file $tdb($j) $txn $t1 test097.check
- error_check_good db_close [$tdb($j) close] 0
-
- # Now compare the keys to see if they match the dictionary
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
- }
- close $oid
- filesort $t2 $t3
- file rename -force $t3 $t2
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- }
- filesort $t1 $t3
-
- error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
- }
- error_check_good envclose [$env close] 0
-}
-
-# Check function for test097; data should be fixed are identical
-proc test097.check { key data } {
- global pad_datastr
- error_check_good "data mismatch for key $key" $data $pad_datastr
-}
-
-proc test097_open { tdb ndbs method env basename largs } {
- global errorCode
- upvar $tdb db
-
- set j 0
- set numdb $ndbs
- if { [is_queueext $method] } {
- set numdb [expr $ndbs / 4]
- }
- set omethod [convert_method $method]
- for { set i 1 } {$i <= $numdb } { incr i } {
- set stat [catch {eval {berkdb_open -env $env \
- -pagesize 512 -create -mode 0644} \
- $largs {$omethod $basename.$i.db}} db($i)]
- #
- # Check if we've reached our limit
- #
- if { $stat == 1 } {
- set min 20
- set em [is_substr $errorCode EMFILE]
- set en [is_substr $errorCode ENFILE]
- error_check_good open_ret [expr $em || $en] 1
- puts \
- "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
- if { [is_queueext $method] } {
- set end [expr $j / 4]
- set min 10
- } else {
- set end [expr $j - 10]
- }
- #
- # If we cannot open even $min files, then this test is
- # not very useful. Close up shop and go back.
- #
- if { $end < $min } {
- test097_close db 1 $j
- return 0
- }
- test097_close db [expr $end + 1] $j
- return $end
- } else {
- error_check_good dbopen [is_valid_db $db($i)] TRUE
- set j $i
- }
- }
- return $j
-}
-
-proc test097_close { tdb start end } {
- upvar $tdb db
-
- for { set i $start } { $i <= $end } { incr i } {
- error_check_good db($i)close [$db($i) close] 0
- }
-}
diff --git a/storage/bdb/test/test098.tcl b/storage/bdb/test/test098.tcl
deleted file mode 100644
index af6b6a6c607..00000000000
--- a/storage/bdb/test/test098.tcl
+++ /dev/null
@@ -1,91 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2002-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test098.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test098
-# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
-# TEST a secondary, and do a normal cursor get followed by a get_recno.
-# TEST (This is a smoke test for "Bug #1" in [#5811].)
-
-proc test098 { method args } {
- source ./include.tcl
-
- set omethod [convert_method $method]
- set args [convert_args $method $args]
-
- puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
-
- if { [is_rbtree $method] != 1 } {
- puts "\tTest098: Skipping for method $method."
- return
- }
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- set txn ""
- set auto ""
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set base $testdir/test098
- set env NULL
- } else {
- set base test098
- incr eindex
- set env [lindex $args $eindex]
- set rpcenv [is_rpcenv $env]
- if { $rpcenv == 1 } {
- puts "Test098: Skipping for RPC"
- return
- }
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- set auto " -auto_commit "
- }
- set testdir [get_home $env]
- }
- cleanup $testdir $env
-
- puts "\tTest098.a: Set up databases."
-
- set adb [eval {berkdb_open} $omethod $args $auto \
- {-create} $base-primary.db]
- error_check_good adb_create [is_valid_db $adb] TRUE
-
- set bdb [eval {berkdb_open} $omethod $args $auto \
- {-create} $base-secondary.db]
- error_check_good bdb_create [is_valid_db $bdb] TRUE
-
- set ret [eval $adb associate $auto [callback_n 0] $bdb]
- error_check_good associate $ret 0
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set ret [eval {$adb put} $txn aaa data1]
- error_check_good put $ret 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
-
- set bc [$bdb cursor]
- error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
-
- puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
- error_check_good get_first [$bc get -first] \
- [list [list [[callback_n 0] aaa data1] data1]]
-
- puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
- error_check_good get_recno [$bc get -get_recno] 1
-
- error_check_good c_close [$bc close] 0
-
- error_check_good bdb_close [$bdb close] 0
- error_check_good adb_close [$adb close] 0
-}
diff --git a/storage/bdb/test/test099.tcl b/storage/bdb/test/test099.tcl
deleted file mode 100644
index 9bdc0d7af42..00000000000
--- a/storage/bdb/test/test099.tcl
+++ /dev/null
@@ -1,276 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test099.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test099
-# TEST
-# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
-# TEST
-# TEST Populate a small btree -recnum database.
-# TEST After all are entered, retrieve each using -recno with DB->get.
-# TEST Open a cursor and do the same for DBC->c_get with set_recno.
-# TEST Verify that set_recno sets the record number position properly.
-# TEST Verify that get_recno returns the correct record numbers.
-# TEST
-# TEST Using the same database, open 3 cursors and position one at
-# TEST the beginning, one in the middle, and one at the end. Delete
-# TEST by cursor and check that record renumbering is done properly.
-#
-proc test099 { method {nentries 10000} args } {
- source ./include.tcl
-
- set args [convert_args $method $args]
- set omethod [convert_method $method]
-
- puts "Test099: Test of set_recno and get_recno in DBC->c_get."
- if { [is_rbtree $method] != 1 } {
- puts "Test099: skipping for method $method."
- return
- }
-
- set txnenv 0
- set eindex [lsearch -exact $args "-env"]
- #
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
- if { $eindex == -1 } {
- set testfile $testdir/test099.db
- set env NULL
- } else {
- set testfile test099.db
- incr eindex
- set env [lindex $args $eindex]
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- #
- # If we are using txns and running with the
- # default, set the default down a bit.
- #
- if { $nentries == 10000 } {
- set nentries 100
- }
- }
- set testdir [get_home $env]
- }
- set t1 $testdir/t1
- cleanup $testdir $env
-
- # Create the database and open the dictionary
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 1
-
- append gflags " -recno"
-
- puts "\tTest099.a: put loop"
- # Here is the loop where we put each key/data pair
- while { [gets $did str] != -1 && $count <= $nentries } {
- set key $str
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set r [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good db_put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- incr count
- }
- close $did
-
- puts "\tTest099.b: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test099.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- puts "\tTest099.c: Test set_recno then get_recno"
- set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Open a cursor
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
-
- set did [open $t1]
- set recno 1
-
- # Create key(recno) array to use for later comparison
- while { [gets $did str] != -1 } {
- set kvals($recno) $str
- incr recno
- }
-
- set recno 1
- set ret [$dbc get -first]
- error_check_bad dbc_get_first [llength $ret] 0
-
- # First walk forward through the database ....
- while { $recno < $count } {
- # Test set_recno: verify it sets the record number properly.
- set current [$dbc get -current]
- set r [$dbc get -set_recno $recno]
- error_check_good set_recno $current $r
- # Test set_recno: verify that we find the expected key
- # at the current record number position.
- set k [lindex [lindex $r 0] 0]
- error_check_good set_recno $kvals($recno) $k
-
- # Test get_recno: verify that the return from
- # get_recno matches the record number just set.
- set g [$dbc get -get_recno]
- error_check_good get_recno $recno $g
- set ret [$dbc get -next]
- incr recno
- }
-
- # ... and then backward.
- set recno [expr $count - 1]
- while { $recno > 0 } {
- # Test set_recno: verify that we find the expected key
- # at the current record number position.
- set r [$dbc get -set_recno $recno]
- set k [lindex [lindex $r 0] 0]
- error_check_good set_recno $kvals($recno) $k
-
- # Test get_recno: verify that the return from
- # get_recno matches the record number just set.
- set g [$dbc get -get_recno]
- error_check_good get_recno $recno $g
- set recno [expr $recno - 1]
- }
-
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
- close $did
-
- puts "\tTest099.d: Test record renumbering with cursor deletes."
- # Reopen the database, this time with write permission.
- set db [eval {berkdb_open} $args $omethod $testfile ]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Open three cursors.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc0 [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc0 $db] TRUE
- set dbc1 [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc1 $db] TRUE
- set dbc2 [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
-
- # Initialize cursor positions. Set dbc0 at the beginning,
- # dbc1 at the middle, and dbc2 at the end.
- set ret [$dbc0 get -first]
- error_check_bad dbc0_get_first [llength $ret] 0
-
- set middle [expr $nentries / 2 + 1]
- set ret [$dbc1 get -set_recno $middle]
- error_check_bad dbc1_get_middle [llength $ret] 0
-
- set ret [$dbc2 get -last]
- error_check_bad dbc2_get_last [llength $ret] 0
-
- # At each iteration, delete the first entry, delete the middle
- # entry, and check the record number for beginning, middle and end.
- set count 1
- while { $count <= [expr $nentries / 2] } {
- # Delete first item.
- error_check_good dbc0_del [$dbc0 del] 0
-
- # For non-txn env's, check that db_stat is recalculating
- # to adjust for items marked for deletion. We can't do this
- # in txn env's because the live txn will cause deadlock.
- if { $txnenv == 0 } {
- set nkeys [expr $nentries - [expr $count * 2] + 1]
- set stat [$db stat]
- error_check_good keys_after_delete [is_substr $stat \
- "{Number of keys} $nkeys"] 1
- error_check_good records_after_delete [is_substr $stat \
- "{Number of records} $nkeys"] 1
-
- # Now delete the same entry again (which should not
- # change the database) and make sure db->stat returns
- # the same number of keys and records as before.
- catch {[$dbc0 del]} result
-
- set stat [$db stat]
- error_check_good keys_after_baddelete [is_substr $stat \
- "{Number of keys} $nkeys"] 1
- error_check_good recs_after_baddelete [is_substr $stat \
- "{Number of records} $nkeys"] 1
- }
-
- # Reposition cursor to new first item, check that record number
- # is 1.
- set ret0 [$dbc0 get -next]
- error_check_good beginning_recno [$dbc0 get -get_recno] 1
-
- # Calculate the current middle recno and compare to actual.
- set middle [$dbc1 get -get_recno]
- set calcmiddle [expr [expr $nentries / 2] - $count + 1]
- error_check_good middle_recno $middle $calcmiddle
-
- # Delete middle item, reposition cursor to next item.
- error_check_good dbc1_del [$dbc1 del] 0
- set ret1 [$dbc1 get -next]
-
- # Calculate the expected end recno and compare to actual.
- set end [$dbc2 get -get_recno]
- set calcend [expr $nentries - [expr $count * 2]]
- # On the last iteration, all items have been deleted so
- # there is no recno.
- if { $calcend == 0 } {
- error_check_good end_recno $end ""
- } else {
- error_check_good end_recno $end $calcend
- }
- incr count
- }
-
- # Close all three cursors.
- error_check_good cursor_close [$dbc0 close] 0
- error_check_good cursor_close [$dbc1 close] 0
- error_check_good cursor_close [$dbc2 close] 0
-
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
-
-# Check function for dumped file; data should be fixed are identical
-proc test099.check { key data } {
- error_check_good "data mismatch for key $key" $key $data
-}
diff --git a/storage/bdb/test/test100.tcl b/storage/bdb/test/test100.tcl
deleted file mode 100644
index 9d87331dc08..00000000000
--- a/storage/bdb/test/test100.tcl
+++ /dev/null
@@ -1,17 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test100.tcl,v 11.5 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test100
-# TEST Test for functionality near the end of the queue
-# TEST using test025 (DB_APPEND).
-proc test100 { method {nentries 10000} {tnum "100"} args} {
- if { [is_queueext $method ] == 0 } {
- puts "Skipping test$tnum for $method."
- return;
- }
- eval {test025 $method $nentries 4294967000 $tnum} $args
-}
diff --git a/storage/bdb/test/test101.tcl b/storage/bdb/test/test101.tcl
deleted file mode 100644
index 63384dd1776..00000000000
--- a/storage/bdb/test/test101.tcl
+++ /dev/null
@@ -1,17 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: test101.tcl,v 11.6 2004/01/28 03:36:32 bostic Exp $
-#
-# TEST test101
-# TEST Test for functionality near the end of the queue
-# TEST using test070 (DB_CONSUME).
-proc test101 { method {nentries 1000} {txn -txn} {tnum "101"} args} {
- if { [is_queueext $method ] == 0 } {
- puts "Skipping test$tnum for $method."
- return;
- }
- eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args
-}
diff --git a/storage/bdb/test/testparams.tcl b/storage/bdb/test/testparams.tcl
deleted file mode 100644
index 16ef7c9b0b0..00000000000
--- a/storage/bdb/test/testparams.tcl
+++ /dev/null
@@ -1,336 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: testparams.tcl,v 11.200 2004/10/12 16:22:14 sue Exp $
-
-global one_test
-global serial_tests
-set serial_tests {rep002 rep005}
-
-set subs {bigfile dead env fop lock log memp mutex recd rep rpc rsrc\
- sdb sdbtest sec si test txn}
-
-set test_names(bigfile) [list bigfile001 bigfile002]
-set test_names(dead) [list dead001 dead002 dead003 dead004 dead005 dead006 \
- dead007]
-set test_names(elect) [list rep002 rep005 rep016 rep020 rep022]
-set test_names(env) [list env001 env002 env003 env004 env005 env006 \
- env007 env008 env009 env010 env011]
-set test_names(fop) [list fop001 fop002 fop003 fop004 fop005 fop006]
-set test_names(lock) [list lock001 lock002 lock003 lock004 lock005 lock006]
-set test_names(log) [list log001 log002 log003 log004 log005 log006]
-set test_names(memp) [list memp001 memp002 memp003 memp004]
-set test_names(mutex) [list mutex001 mutex002 mutex003]
-set test_names(recd) [list recd001 recd002 recd003 recd004 recd005 recd006 \
- recd007 recd008 recd009 recd010 recd011 recd012 recd013 recd014 recd015 \
- recd016 recd017 recd018 recd019 recd020 ]
-set test_names(rep) [list rep001 rep002 rep003 rep005 rep006 rep007 \
- rep008 rep009 rep010 rep011 rep012 rep013 rep014 rep015 rep016 rep017 \
- rep018 rep019 rep020 rep021 rep022 rep023 rep024 rep026 rep027 rep028 \
- rep029 rep030 rep031 rep032 rep033 rep034 rep035 rep036 rep037]
-set test_names(rpc) [list rpc001 rpc002 rpc003 rpc004 rpc005 rpc006]
-set test_names(rsrc) [list rsrc001 rsrc002 rsrc003 rsrc004]
-set test_names(sdb) [list sdb001 sdb002 sdb003 sdb004 sdb005 sdb006 \
- sdb007 sdb008 sdb009 sdb010 sdb011 sdb012]
-set test_names(sdbtest) [list sdbtest001 sdbtest002]
-set test_names(sec) [list sec001 sec002]
-set test_names(si) [list si001 si002 si003 si004 si005]
-set test_names(test) [list test001 test002 test003 test004 test005 \
- test006 test007 test008 test009 test010 test011 test012 test013 test014 \
- test015 test016 test017 test018 test019 test020 test021 test022 test023 \
- test024 test025 test026 test027 test028 test029 test030 test031 test032 \
- test033 test034 test035 test036 test037 test038 test039 test040 test041 \
- test042 test043 test044 test045 test046 test047 test048 test049 test050 \
- test051 test052 test053 test054 test055 test056 test057 test058 test059 \
- test060 test061 test062 test063 test064 test065 test066 test067 test068 \
- test069 test070 test071 test072 test073 test074 test076 test077 \
- test078 test079 test081 test082 test083 test084 test085 test086 \
- test087 test088 test089 test090 test091 test092 test093 test094 test095 \
- test096 test097 test098 test099 test100 test101 test102 test103 test107 \
- test109 ]
-set test_names(txn) [list txn001 txn002 txn003 txn004 txn005 txn006 \
- txn007 txn008 txn009 txn010 txn011]
-
-set rpc_tests(berkeley_db_svc) [concat $test_names(test) $test_names(sdb)]
-set rpc_tests(berkeley_db_cxxsvc) $test_names(test)
-set rpc_tests(berkeley_db_javasvc) $test_names(test)
-
-# JE tests are a subset of regular RPC tests -- exclude these ones.
-# be fixable by modifying tests dealing with unsorted duplicates, second line
-# will probably never work unless certain features are added to JE (record
-# numbers, bulk get, etc.).
-set je_exclude {(?x) # Turn on extended syntax
- test(010|026|027|028|030|031|032|033|034| # These should be fixable by
- 035|039|041|046|047|054|056|057|062| # modifying tests to avoid
- 066|073|081|085)| # unsorted dups, etc.
-
- test(011|017|018|022|023|024|029|040|049| # Not expected to work with
- 062|083|095) # JE until / unless features
- # are added to JE (record
- # numbers, bulk gets, etc.)
-}
-set rpc_tests(berkeley_dbje_svc) [lsearch -all -inline -not -regexp \
- $rpc_tests(berkeley_db_svc) $je_exclude]
-
-# Source all the tests, whether we're running one or many.
-foreach sub $subs {
- foreach test $test_names($sub) {
- source $test_path/$test.tcl
- }
-}
-
-# Reset test_names if we're running only one test.
-if { $one_test != "ALL" } {
- foreach sub $subs {
- set test_names($sub) ""
- }
- set type [string trim $one_test 0123456789]
- set test_names($type) [list $one_test]
-}
-
-source $test_path/archive.tcl
-source $test_path/byteorder.tcl
-source $test_path/dbm.tcl
-source $test_path/foputils.tcl
-source $test_path/hsearch.tcl
-source $test_path/join.tcl
-source $test_path/logtrack.tcl
-source $test_path/ndbm.tcl
-source $test_path/parallel.tcl
-source $test_path/reputils.tcl
-source $test_path/sdbutils.tcl
-source $test_path/shelltest.tcl
-source $test_path/sijointest.tcl
-source $test_path/siutils.tcl
-source $test_path/testutils.tcl
-source $test_path/upgrade.tcl
-
-set parms(recd001) 0
-set parms(recd002) 0
-set parms(recd003) 0
-set parms(recd004) 0
-set parms(recd005) ""
-set parms(recd006) 0
-set parms(recd007) ""
-set parms(recd008) {4 4}
-set parms(recd009) 0
-set parms(recd010) 0
-set parms(recd011) {200 15 1}
-set parms(recd012) {0 49 25 100 5}
-set parms(recd013) 100
-set parms(recd014) ""
-set parms(recd015) ""
-set parms(recd016) ""
-set parms(recd017) 0
-set parms(recd018) 10
-set parms(recd019) 50
-set parms(recd020) ""
-set parms(rep001) {1000 "001"}
-set parms(rep002) {10 3 "002"}
-set parms(rep003) "003"
-set parms(rep005) ""
-set parms(rep006) {1000 "006"}
-set parms(rep007) {10 "007"}
-set parms(rep008) {10 "008"}
-set parms(rep009) {10 "009"}
-set parms(rep010) {100 "010"}
-set parms(rep011) "011"
-set parms(rep012) {10 "012"}
-set parms(rep013) {10 "013"}
-set parms(rep014) {10 "014"}
-set parms(rep015) {100 "015" 3}
-set parms(rep016) ""
-set parms(rep017) {10 "017"}
-set parms(rep018) {10 "018"}
-set parms(rep019) {3 "019"}
-set parms(rep020) ""
-set parms(rep021) {3 "021"}
-set parms(rep022) ""
-set parms(rep023) {10 "023"}
-set parms(rep024) {1000 "024"}
-set parms(rep026) ""
-set parms(rep027) {1000 "027"}
-set parms(rep028) {100 "028"}
-set parms(rep029) {200 "029"}
-set parms(rep030) {500 "030"}
-set parms(rep031) {200 "031"}
-set parms(rep032) {200 "032"}
-set parms(rep033) {200 "033"}
-set parms(rep034) {2 "034"}
-set parms(rep035) {100 "035"}
-set parms(rep036) {200 "036"}
-set parms(subdb001) ""
-set parms(subdb002) 10000
-set parms(subdb003) 1000
-set parms(subdb004) ""
-set parms(subdb005) 100
-set parms(subdb006) 100
-set parms(subdb007) ""
-set parms(subdb008) ""
-set parms(subdb009) ""
-set parms(subdb010) ""
-set parms(subdb011) {13 10}
-set parms(subdb012) ""
-set parms(sdb001) ""
-set parms(sdb002) 10000
-set parms(sdb003) 1000
-set parms(sdb004) ""
-set parms(sdb005) 100
-set parms(sdb006) 100
-set parms(sdb007) ""
-set parms(sdb008) ""
-set parms(sdb009) ""
-set parms(sdb010) ""
-set parms(sdb011) {13 10}
-set parms(sdb012) ""
-set parms(si001) {200 1}
-set parms(si002) {200 2}
-set parms(si003) {200 3}
-set parms(si004) {200 4}
-set parms(si005) {200 5}
-set parms(test001) {10000 0 0 "001"}
-set parms(test002) 10000
-set parms(test003) ""
-set parms(test004) {10000 "004" 0}
-set parms(test005) 10000
-set parms(test006) {10000 0 "006" 5}
-set parms(test007) {10000 "007" 5}
-set parms(test008) {"008" 0}
-set parms(test009) ""
-set parms(test010) {10000 5 "010"}
-set parms(test011) {10000 5 "011"}
-set parms(test012) ""
-set parms(test013) 10000
-set parms(test014) 10000
-set parms(test015) {7500 0}
-set parms(test016) 10000
-set parms(test017) {0 19 "017"}
-set parms(test018) 10000
-set parms(test019) 10000
-set parms(test020) 10000
-set parms(test021) 10000
-set parms(test022) ""
-set parms(test023) ""
-set parms(test024) 10000
-set parms(test025) {10000 0 "025"}
-set parms(test026) {2000 5 "026"}
-set parms(test027) {100}
-set parms(test028) ""
-set parms(test029) 10000
-set parms(test030) 10000
-set parms(test031) {10000 5 "031"}
-set parms(test032) {10000 5 "032"}
-set parms(test033) {10000 5 "033"}
-set parms(test034) 10000
-set parms(test035) 10000
-set parms(test036) 10000
-set parms(test037) 100
-set parms(test038) {10000 5 "038"}
-set parms(test039) {10000 5 "039"}
-set parms(test040) 10000
-set parms(test041) 10000
-set parms(test042) 1000
-set parms(test043) 10000
-set parms(test044) {5 10 0}
-set parms(test045) 1000
-set parms(test046) ""
-set parms(test047) ""
-set parms(test048) ""
-set parms(test049) ""
-set parms(test050) ""
-set parms(test051) ""
-set parms(test052) ""
-set parms(test053) ""
-set parms(test054) ""
-set parms(test055) ""
-set parms(test056) ""
-set parms(test057) ""
-set parms(test058) ""
-set parms(test059) ""
-set parms(test060) ""
-set parms(test061) ""
-set parms(test062) {200 200 "062"}
-set parms(test063) ""
-set parms(test064) ""
-set parms(test065) ""
-set parms(test066) ""
-set parms(test067) {1000 "067"}
-set parms(test068) ""
-set parms(test069) {50 "069"}
-set parms(test070) {4 2 1000 CONSUME 0 -txn "070"}
-set parms(test071) {1 1 10000 CONSUME 0 -txn "071"}
-set parms(test072) {512 20 "072"}
-set parms(test073) {512 50 "073"}
-set parms(test074) {-nextnodup 100 "074"}
-set parms(test076) {1000 "076"}
-set parms(test077) {1000 "077"}
-set parms(test078) {100 512 "078"}
-set parms(test079) {10000 512 "079" 20}
-set parms(test081) {13 "081"}
-set parms(test082) {-prevnodup 100 "082"}
-set parms(test083) {512 5000 2}
-set parms(test084) {10000 "084" 65536}
-set parms(test085) {512 3 10 "085"}
-set parms(test086) ""
-set parms(test087) {512 50 "087"}
-set parms(test088) ""
-set parms(test089) 1000
-set parms(test090) {10000 "090"}
-set parms(test091) {4 2 1000 0 "091"}
-set parms(test092) {1000}
-set parms(test093) {10000 "093"}
-set parms(test094) {10000 10 "094"}
-set parms(test095) {"095"}
-set parms(test096) {512 1000 19}
-set parms(test097) {500 400}
-set parms(test098) ""
-set parms(test099) 10000
-set parms(test100) {10000 "100"}
-set parms(test101) {1000 -txn "101"}
-set parms(test102) {1000 "102"}
-set parms(test103) {100 4294967250 "103"}
-set parms(test107) ""
-set parms(test109) {"109"}
-
-# RPC server executables. Each of these is tested (if it exists)
-# when running the RPC tests.
-set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
- berkeley_db_javasvc berkeley_dbje_svc }
-set rpc_svc berkeley_db_svc
-
-# Shell script tests. Each list entry is a {directory filename} pair,
-# invoked with "/bin/sh filename".
-set shelltest_list {
- { scr001 chk.code }
- { scr002 chk.def }
- { scr003 chk.define }
- { scr004 chk.javafiles }
- { scr005 chk.nl }
- { scr006 chk.offt }
- { scr007 chk.proto }
- { scr008 chk.pubdef }
- { scr009 chk.srcfiles }
- { scr010 chk.str }
- { scr011 chk.tags }
- { scr012 chk.vx_code }
- { scr013 chk.stats }
- { scr014 chk.err }
- { scr015 chk.cxxtests }
- { scr016 chk.javatests }
- { scr017 chk.db185 }
- { scr018 chk.comma }
- { scr019 chk.include }
- { scr020 chk.inc }
- { scr021 chk.flags }
- { scr022 chk.rr }
- { scr023 chk.q }
- { scr024 chk.bdb }
- { scr025 chk.cxxmulti }
- { scr026 chk.method }
- { scr027 chk.javas }
- { scr028 chk.rtc }
- { scr029 chk.get }
- { scr030 chk.build }
-}
diff --git a/storage/bdb/test/testutils.tcl b/storage/bdb/test/testutils.tcl
deleted file mode 100644
index ded9bc5ce7d..00000000000
--- a/storage/bdb/test/testutils.tcl
+++ /dev/null
@@ -1,3458 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: testutils.tcl,v 11.198 2004/09/28 15:02:18 carol Exp $
-#
-# Test system utilities
-#
-# Timestamp -- print time along with elapsed time since last invocation
-# of timestamp.
-proc timestamp {{opt ""}} {
- global __timestamp_start
-
- set now [clock seconds]
-
- # -c accurate to the click, instead of the second.
- # -r seconds since the Epoch
- # -t current time in the format expected by db_recover -t.
- # -w wallclock time
- # else wallclock plus elapsed time.
- if {[string compare $opt "-r"] == 0} {
- return $now
- } elseif {[string compare $opt "-t"] == 0} {
- return [clock format $now -format "%y%m%d%H%M.%S"]
- } elseif {[string compare $opt "-w"] == 0} {
- return [clock format $now -format "%c"]
- } else {
- if {[string compare $opt "-c"] == 0} {
- set printclicks 1
- } else {
- set printclicks 0
- }
-
- if {[catch {set start $__timestamp_start}] != 0} {
- set __timestamp_start $now
- }
- set start $__timestamp_start
-
- set elapsed [expr $now - $start]
- set the_time [clock format $now -format ""]
- set __timestamp_start $now
-
- if { $printclicks == 1 } {
- set pc_print [format ".%08u" [__fix_num [clock clicks]]]
- } else {
- set pc_print ""
- }
-
- format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
- [__fix_num [clock format $now -format "%H"]] \
- [__fix_num [clock format $now -format "%M"]] \
- [__fix_num [clock format $now -format "%S"]] \
- [expr $elapsed / 3600] \
- [expr ($elapsed % 3600) / 60] \
- [expr ($elapsed % 3600) % 60]
- }
-}
-
-proc __fix_num { num } {
- set num [string trimleft $num "0"]
- if {[string length $num] == 0} {
- set num "0"
- }
- return $num
-}
-
-# Add a {key,data} pair to the specified database where
-# key=filename and data=file contents.
-proc put_file { db txn flags file } {
- source ./include.tcl
-
- set fid [open $file r]
- fconfigure $fid -translation binary
- set data [read $fid]
- close $fid
-
- set ret [eval {$db put} $txn $flags {$file $data}]
- error_check_good put_file $ret 0
-}
-
-# Get a {key,data} pair from the specified database where
-# key=filename and data=file contents and then write the
-# data to the specified file.
-proc get_file { db txn flags file outfile } {
- source ./include.tcl
-
- set fid [open $outfile w]
- fconfigure $fid -translation binary
- if [catch {eval {$db get} $txn $flags {$file}} data] {
- puts -nonewline $fid $data
- } else {
- # Data looks like {{key data}}
- set data [lindex [lindex $data 0] 1]
- puts -nonewline $fid $data
- }
- close $fid
-}
-
-# Add a {key,data} pair to the specified database where
-# key=file contents and data=file name.
-proc put_file_as_key { db txn flags file } {
- source ./include.tcl
-
- set fid [open $file r]
- fconfigure $fid -translation binary
- set filecont [read $fid]
- close $fid
-
- # Use not the file contents, but the file name concatenated
- # before the file contents, as a key, to ensure uniqueness.
- set data $file$filecont
-
- set ret [eval {$db put} $txn $flags {$data $file}]
- error_check_good put_file $ret 0
-}
-
-# Get a {key,data} pair from the specified database where
-# key=file contents and data=file name
-proc get_file_as_key { db txn flags file} {
- source ./include.tcl
-
- set fid [open $file r]
- fconfigure $fid -translation binary
- set filecont [read $fid]
- close $fid
-
- set data $file$filecont
-
- return [eval {$db get} $txn $flags {$data}]
-}
-
-# open file and call dump_file to dumpkeys to tempfile
-proc open_and_dump_file {
- dbname env outfile checkfunc dump_func beg cont } {
- global encrypt
- global passwd
- source ./include.tcl
-
- set encarg ""
- if { $encrypt > 0 && $env == "NULL" } {
- set encarg "-encryptany $passwd"
- }
- set envarg ""
- set txn ""
- set txnenv 0
- if { $env != "NULL" } {
- append envarg " -env $env "
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append envarg " -auto_commit "
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- }
- set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- $dump_func $db $txn $outfile $checkfunc $beg $cont
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
-
-# open file and call dump_file to dumpkeys to tempfile
-proc open_and_dump_subfile {
- dbname env outfile checkfunc dump_func beg cont subdb} {
- global encrypt
- global passwd
- source ./include.tcl
-
- set encarg ""
- if { $encrypt > 0 && $env == "NULL" } {
- set encarg "-encryptany $passwd"
- }
- set envarg ""
- set txn ""
- set txnenv 0
- if { $env != "NULL" } {
- append envarg "-env $env"
- set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append envarg " -auto_commit "
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- }
- set db [eval {berkdb open -rdonly -unknown} \
- $envarg $encarg {$dbname $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
- $dump_func $db $txn $outfile $checkfunc $beg $cont
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-}
-
-# Sequentially read a file and call checkfunc on each key/data pair.
-# Dump the keys out to the file specified by outfile.
-proc dump_file { db txn outfile checkfunc } {
- source ./include.tcl
-
- dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
-}
-
-proc dump_file_direction { db txn outfile checkfunc start continue } {
- source ./include.tcl
-
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- dump_file_walk $c $outfile $checkfunc $start $continue
- error_check_good curs_close [$c close] 0
-}
-
-proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
- set outf [open $outfile w]
- for {set d [eval {$c get} $flag $start] } \
- { [llength $d] != 0 } \
- {set d [eval {$c get} $flag $continue] } {
- set kd [lindex $d 0]
- set k [lindex $kd 0]
- set d2 [lindex $kd 1]
- $checkfunc $k $d2
- puts $outf $k
- # XXX: Geoff Mainland
- # puts $outf "$k $d2"
- }
- close $outf
-}
-
-proc dump_binkey_file { db txn outfile checkfunc } {
- source ./include.tcl
-
- dump_binkey_file_direction $db $txn $outfile $checkfunc \
- "-first" "-next"
-}
-proc dump_bin_file { db txn outfile checkfunc } {
- source ./include.tcl
-
- dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
-}
-
-# Note: the following procedure assumes that the binary-file-as-keys were
-# inserted into the database by put_file_as_key, and consist of the file
-# name followed by the file contents as key, to ensure uniqueness.
-proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
- source ./include.tcl
-
- set d1 $testdir/d1
-
- set outf [open $outfile w]
-
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
- error_check_good db_cursor [is_valid_cursor $c $db] TRUE
-
- set inf $d1
- for {set d [$c get $begin] } { [llength $d] != 0 } \
- {set d [$c get $cont] } {
- set kd [lindex $d 0]
- set keyfile [lindex $kd 0]
- set data [lindex $kd 1]
-
- set ofid [open $d1 w]
- fconfigure $ofid -translation binary
-
- # Chop off the first few bytes--that's the file name,
- # added for uniqueness in put_file_as_key, which we don't
- # want in the regenerated file.
- set namelen [string length $data]
- set keyfile [string range $keyfile $namelen end]
- puts -nonewline $ofid $keyfile
- close $ofid
-
- $checkfunc $data $d1
- puts $outf $data
- flush $outf
- }
- close $outf
- error_check_good curs_close [$c close] 0
- fileremove $d1
-}
-
-proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
- source ./include.tcl
-
- set d1 $testdir/d1
-
- set outf [open $outfile w]
-
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
-
- for {set d [$c get $begin] } \
- { [llength $d] != 0 } {set d [$c get $cont] } {
- set k [lindex [lindex $d 0] 0]
- set data [lindex [lindex $d 0] 1]
- set ofid [open $d1 w]
- fconfigure $ofid -translation binary
- puts -nonewline $ofid $data
- close $ofid
-
- $checkfunc $k $d1
- puts $outf $k
- }
- close $outf
- error_check_good curs_close [$c close] 0
- fileremove -f $d1
-}
-
-proc make_data_str { key } {
- set datastr ""
- for {set i 0} {$i < 10} {incr i} {
- append datastr $key
- }
- return $datastr
-}
-
-proc error_check_bad { func result bad {txn 0}} {
- if { [binary_compare $result $bad] == 0 } {
- if { $txn != 0 } {
- $txn abort
- }
- flush stdout
- flush stderr
- error "FAIL:[timestamp] $func returned error value $bad"
- }
-}
-
-proc error_check_good { func result desired {txn 0} } {
- if { [binary_compare $desired $result] != 0 } {
- if { $txn != 0 } {
- $txn abort
- }
- flush stdout
- flush stderr
- error "FAIL:[timestamp]\
- $func: expected $desired, got $result"
- }
-}
-
-# Locks have the prefix of their manager.
-proc is_substr { str sub } {
- if { [string first $sub $str] == -1 } {
- return 0
- } else {
- return 1
- }
-}
-
-proc is_serial { str } {
- global serial_tests
-
- foreach test $serial_tests {
- if { [is_substr $str $test] == 1 } {
- return 1
- }
- }
- return 0
-}
-
-proc release_list { l } {
-
- # Now release all the locks
- foreach el $l {
- catch { $el put } ret
- error_check_good lock_put $ret 0
- }
-}
-
-proc debug { {stop 0} } {
- global __debug_on
- global __debug_print
- global __debug_test
-
- set __debug_on 1
- set __debug_print 1
- set __debug_test $stop
-}
-
-# Check if each key appears exactly [llength dlist] times in the file with
-# the duplicate tags matching those that appear in dlist.
-proc dup_check { db txn tmpfile dlist {extra 0}} {
- source ./include.tcl
-
- set outf [open $tmpfile w]
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
- set lastkey ""
- set done 0
- while { $done != 1} {
- foreach did $dlist {
- set rec [$c get "-next"]
- if { [string length $rec] == 0 } {
- set done 1
- break
- }
- set key [lindex [lindex $rec 0] 0]
- set fulldata [lindex [lindex $rec 0] 1]
- set id [id_of $fulldata]
- set d [data_of $fulldata]
- if { [string compare $key $lastkey] != 0 && \
- $id != [lindex $dlist 0] } {
- set e [lindex $dlist 0]
- error "FAIL: \tKey \
- $key, expected dup id $e, got $id"
- }
- error_check_good dupget.data $d $key
- error_check_good dupget.id $id $did
- set lastkey $key
- }
- #
- # Some tests add an extra dup (like overflow entries)
- # Check id if it exists.
- if { $extra != 0} {
- set okey $key
- set rec [$c get "-next"]
- if { [string length $rec] != 0 } {
- set key [lindex [lindex $rec 0] 0]
- #
- # If this key has no extras, go back for
- # next iteration.
- if { [string compare $key $lastkey] != 0 } {
- set key $okey
- set rec [$c get "-prev"]
- } else {
- set fulldata [lindex [lindex $rec 0] 1]
- set id [id_of $fulldata]
- set d [data_of $fulldata]
- error_check_bad dupget.data1 $d $key
- error_check_good dupget.id1 $id $extra
- }
- }
- }
- if { $done != 1 } {
- puts $outf $key
- }
- }
- close $outf
- error_check_good curs_close [$c close] 0
-}
-
-# Check if each key appears exactly [llength dlist] times in the file with
-# the duplicate tags matching those that appear in dlist.
-proc dup_file_check { db txn tmpfile dlist } {
- source ./include.tcl
-
- set outf [open $tmpfile w]
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
- set lastkey ""
- set done 0
- while { $done != 1} {
- foreach did $dlist {
- set rec [$c get "-next"]
- if { [string length $rec] == 0 } {
- set done 1
- break
- }
- set key [lindex [lindex $rec 0] 0]
- if { [string compare $key $lastkey] != 0 } {
- #
- # If we changed files read in new contents.
- #
- set fid [open $key r]
- fconfigure $fid -translation binary
- set filecont [read $fid]
- close $fid
- }
- set fulldata [lindex [lindex $rec 0] 1]
- set id [id_of $fulldata]
- set d [data_of $fulldata]
- if { [string compare $key $lastkey] != 0 && \
- $id != [lindex $dlist 0] } {
- set e [lindex $dlist 0]
- error "FAIL: \tKey \
- $key, expected dup id $e, got $id"
- }
- error_check_good dupget.data $d $filecont
- error_check_good dupget.id $id $did
- set lastkey $key
- }
- if { $done != 1 } {
- puts $outf $key
- }
- }
- close $outf
- error_check_good curs_close [$c close] 0
-}
-
-# Parse duplicate data entries of the form N:data. Data_of returns
-# the data part; id_of returns the numerical part
-proc data_of {str} {
- set ndx [string first ":" $str]
- if { $ndx == -1 } {
- return ""
- }
- return [ string range $str [expr $ndx + 1] end]
-}
-
-proc id_of {str} {
- set ndx [string first ":" $str]
- if { $ndx == -1 } {
- return ""
- }
-
- return [ string range $str 0 [expr $ndx - 1]]
-}
-
-proc nop { {args} } {
- return
-}
-
-# Partial put test procedure.
-# Munges a data val through three different partial puts. Stores
-# the final munged string in the dvals array so that you can check
-# it later (dvals should be global). We take the characters that
-# are being replaced, make them capitals and then replicate them
-# some number of times (n_add). We do this at the beginning of the
-# data, at the middle and at the end. The parameters are:
-# db, txn, key -- as per usual. Data is the original data element
-# from which we are starting. n_replace is the number of characters
-# that we will replace. n_add is the number of times we will add
-# the replaced string back in.
-proc partial_put { method db txn gflags key data n_replace n_add } {
- global dvals
- source ./include.tcl
-
- # Here is the loop where we put and get each key/data pair
- # We will do the initial put and then three Partial Puts
- # for the beginning, middle and end of the string.
-
- eval {$db put} $txn {$key [chop_data $method $data]}
-
- # Beginning change
- set s [string range $data 0 [ expr $n_replace - 1 ] ]
- set repl [ replicate [string toupper $s] $n_add ]
-
- # This is gross, but necessary: if this is a fixed-length
- # method, and the chopped length of $repl is zero,
- # it's because the original string was zero-length and our data item
- # is all nulls. Set repl to something non-NULL.
- if { [is_fixed_length $method] && \
- [string length [chop_data $method $repl]] == 0 } {
- set repl [replicate "." $n_add]
- }
-
- set newstr [chop_data $method $repl[string range $data $n_replace end]]
- set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
- $key [chop_data $method $repl]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags $txn {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
-
- # End Change
- set len [string length $newstr]
- set spl [expr $len - $n_replace]
- # Handle case where $n_replace > $len
- if { $spl < 0 } {
- set spl 0
- }
-
- set s [string range $newstr [ expr $len - $n_replace ] end ]
- # Handle zero-length keys
- if { [string length $s] == 0 } { set s "A" }
-
- set repl [ replicate [string toupper $s] $n_add ]
- set newstr [chop_data $method \
- [string range $newstr 0 [expr $spl - 1 ] ]$repl]
-
- set ret [eval {$db put} $txn \
- {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags $txn {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
-
- # Middle Change
- set len [string length $newstr]
- set mid [expr $len / 2 ]
- set beg [expr $mid - [expr $n_replace / 2] ]
- set end [expr $beg + $n_replace - 1]
- set s [string range $newstr $beg $end]
- set repl [ replicate [string toupper $s] $n_add ]
- set newstr [chop_data $method [string range $newstr 0 \
- [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
-
- set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
- $key [chop_data $method $repl]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags $txn {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
-
- set dvals($key) [pad_data $method $newstr]
-}
-
-proc replicate { str times } {
- set res $str
- for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
- append res $res
- }
- return $res
-}
-
-proc repeat { str n } {
- set ret ""
- while { $n > 0 } {
- set ret $str$ret
- incr n -1
- }
- return $ret
-}
-
-proc isqrt { l } {
- set s [expr sqrt($l)]
- set ndx [expr [string first "." $s] - 1]
- return [string range $s 0 $ndx]
-}
-
-# If we run watch_procs multiple times without an intervening
-# testdir cleanup, it's possible that old sentinel files will confuse
-# us. Make sure they're wiped out before we spawn any other processes.
-proc sentinel_init { } {
- source ./include.tcl
-
- set filelist {}
- set ret [catch {glob $testdir/begin.*} result]
- if { $ret == 0 } {
- set filelist $result
- }
-
- set ret [catch {glob $testdir/end.*} result]
- if { $ret == 0 } {
- set filelist [concat $filelist $result]
- }
-
- foreach f $filelist {
- fileremove $f
- }
-}
-
-proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
- source ./include.tcl
-
- set elapsed 0
-
- # Don't start watching the processes until a sentinel
- # file has been created for each one.
- foreach pid $pidlist {
- while { [file exists $testdir/begin.$pid] == 0 } {
- tclsleep $delay
- incr elapsed $delay
- # If pids haven't been created in one-tenth
- # of the time allowed for the whole test,
- # there's a problem. Report an error and fail.
- if { $elapsed > [expr {$max / 10}] } {
- puts "FAIL: begin.pid not created"
- break
- }
- }
- }
-
- while { 1 } {
-
- tclsleep $delay
- incr elapsed $delay
-
- # Find the list of processes with outstanding sentinel
- # files (i.e. a begin.pid and no end.pid).
- set beginlist {}
- set endlist {}
- set ret [catch {glob $testdir/begin.*} result]
- if { $ret == 0 } {
- set beginlist $result
- }
- set ret [catch {glob $testdir/end.*} result]
- if { $ret == 0 } {
- set endlist $result
- }
-
- set bpids {}
- catch {unset epids}
- foreach begfile $beginlist {
- lappend bpids [string range $begfile \
- [string length $testdir/begin.] end]
- }
- foreach endfile $endlist {
- set epids([string range $endfile \
- [string length $testdir/end.] end]) 1
- }
-
- # The set of processes that we still want to watch, $l,
- # is the set of pids that have begun but not ended
- # according to their sentinel files.
- set l {}
- foreach p $bpids {
- if { [info exists epids($p)] == 0 } {
- lappend l $p
- }
- }
-
- set rlist {}
- foreach i $l {
- set r [ catch { exec $KILL -0 $i } result ]
- if { $r == 0 } {
- lappend rlist $i
- }
- }
- if { [ llength $rlist] == 0 } {
- break
- } else {
- puts "[timestamp] processes running: $rlist"
- }
-
- if { $elapsed > $max } {
- # We have exceeded the limit; kill processes
- # and report an error
- foreach i $l {
- tclkill $i
- }
- }
- }
- if { $quiet == 0 } {
- puts "All processes have exited."
- }
-}
-
-# These routines are all used from within the dbscript.tcl tester.
-proc db_init { dbp do_data } {
- global a_keys
- global l_keys
- source ./include.tcl
-
- set txn ""
- set nk 0
- set lastkey ""
-
- set a_keys() BLANK
- set l_keys ""
-
- set c [$dbp cursor]
- for {set d [$c get -first] } { [llength $d] != 0 } {
- set d [$c get -next] } {
- set k [lindex [lindex $d 0] 0]
- set d2 [lindex [lindex $d 0] 1]
- incr nk
- if { $do_data == 1 } {
- if { [info exists a_keys($k)] } {
- lappend a_keys($k) $d2]
- } else {
- set a_keys($k) $d2
- }
- }
-
- lappend l_keys $k
- }
- error_check_good curs_close [$c close] 0
-
- return $nk
-}
-
-proc pick_op { min max n } {
- if { $n == 0 } {
- return add
- }
-
- set x [berkdb random_int 1 12]
- if {$n < $min} {
- if { $x <= 4 } {
- return put
- } elseif { $x <= 8} {
- return get
- } else {
- return add
- }
- } elseif {$n > $max} {
- if { $x <= 4 } {
- return put
- } elseif { $x <= 8 } {
- return get
- } else {
- return del
- }
-
- } elseif { $x <= 3 } {
- return del
- } elseif { $x <= 6 } {
- return get
- } elseif { $x <= 9 } {
- return put
- } else {
- return add
- }
-}
-
-# random_data: Generate a string of random characters.
-# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
-# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
-# that will fit into a 32-bit integer.
-# If the unique flag is 1, then make sure that the string is unique
-# in the array "where".
-proc random_data { avg unique where {recno 0} } {
- upvar #0 $where arr
- global debug_on
- set min 1
- set max [expr $avg+$avg-1]
- if { $recno } {
- #
- # Tcl seems to have problems with values > 30.
- #
- if { $max > 30 } {
- set max 30
- }
- set maxnum [expr int(pow(2, $max))]
- }
- while {1} {
- set len [berkdb random_int $min $max]
- set s ""
- if {$recno} {
- set s [berkdb random_int 1 $maxnum]
- } else {
- for {set i 0} {$i < $len} {incr i} {
- append s [int_to_char [berkdb random_int 0 25]]
- }
- }
-
- if { $unique == 0 || [info exists arr($s)] == 0 } {
- break
- }
- }
-
- return $s
-}
-
-proc random_key { } {
- global l_keys
- global nkeys
- set x [berkdb random_int 0 [expr $nkeys - 1]]
- return [lindex $l_keys $x]
-}
-
-proc is_err { desired } {
- set x [berkdb random_int 1 100]
- if { $x <= $desired } {
- return 1
- } else {
- return 0
- }
-}
-
-proc pick_cursput { } {
- set x [berkdb random_int 1 4]
- switch $x {
- 1 { return "-keylast" }
- 2 { return "-keyfirst" }
- 3 { return "-before" }
- 4 { return "-after" }
- }
-}
-
-proc random_cursor { curslist } {
- global l_keys
- global nkeys
-
- set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
- set dbc [lindex $curslist $x]
-
- # We want to randomly set the cursor. Pick a key.
- set k [random_key]
- set r [$dbc get "-set" $k]
- error_check_good cursor_get:$k [is_substr Error $r] 0
-
- # Now move forward or backward some hops to randomly
- # position the cursor.
- set dist [berkdb random_int -10 10]
-
- set dir "-next"
- set boundary "-first"
- if { $dist < 0 } {
- set dir "-prev"
- set boundary "-last"
- set dist [expr 0 - $dist]
- }
-
- for { set i 0 } { $i < $dist } { incr i } {
- set r [ record $dbc get $dir $k ]
- if { [llength $d] == 0 } {
- set r [ record $dbc get $k $boundary ]
- }
- error_check_bad dbcget [llength $r] 0
- }
- return { [linsert r 0 $dbc] }
-}
-
-proc record { args } {
-# Recording every operation makes tests ridiculously slow on
-# NT, so we are commenting this out; for debugging purposes,
-# it will undoubtedly be useful to uncomment this.
-# puts $args
-# flush stdout
- return [eval $args]
-}
-
-proc newpair { k data } {
- global l_keys
- global a_keys
- global nkeys
-
- set a_keys($k) $data
- lappend l_keys $k
- incr nkeys
-}
-
-proc rempair { k } {
- global l_keys
- global a_keys
- global nkeys
-
- unset a_keys($k)
- set n [lsearch $l_keys $k]
- error_check_bad rempair:$k $n -1
- set l_keys [lreplace $l_keys $n $n]
- incr nkeys -1
-}
-
-proc changepair { k data } {
- global l_keys
- global a_keys
- global nkeys
-
- set a_keys($k) $data
-}
-
-proc changedup { k olddata newdata } {
- global l_keys
- global a_keys
- global nkeys
-
- set d $a_keys($k)
- error_check_bad changedup:$k [llength $d] 0
-
- set n [lsearch $d $olddata]
- error_check_bad changedup:$k $n -1
-
- set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
-}
-
-# Insert a dup into the a_keys array with DB_KEYFIRST.
-proc adddup { k olddata newdata } {
- global l_keys
- global a_keys
- global nkeys
-
- set d $a_keys($k)
- if { [llength $d] == 0 } {
- lappend l_keys $k
- incr nkeys
- set a_keys($k) { $newdata }
- }
-
- set ndx 0
-
- set d [linsert d $ndx $newdata]
- set a_keys($k) $d
-}
-
-proc remdup { k data } {
- global l_keys
- global a_keys
- global nkeys
-
- set d [$a_keys($k)]
- error_check_bad changedup:$k [llength $d] 0
-
- set n [lsearch $d $olddata]
- error_check_bad changedup:$k $n -1
-
- set a_keys($k) [lreplace $a_keys($k) $n $n]
-}
-
-proc dump_full_file { db txn outfile checkfunc start continue } {
- source ./include.tcl
-
- set outf [open $outfile w]
- # Now we will get each key from the DB and dump to outfile
- set c [eval {$db cursor} $txn]
- error_check_good dbcursor [is_valid_cursor $c $db] TRUE
-
- for {set d [$c get $start] } { [string length $d] != 0 } {
- set d [$c get $continue] } {
- set k [lindex [lindex $d 0] 0]
- set d2 [lindex [lindex $d 0] 1]
- $checkfunc $k $d2
- puts $outf "$k\t$d2"
- }
- close $outf
- error_check_good curs_close [$c close] 0
-}
-
-proc int_to_char { i } {
- global alphabet
-
- return [string index $alphabet $i]
-}
-
-proc dbcheck { key data } {
- global l_keys
- global a_keys
- global nkeys
- global check_array
-
- if { [lsearch $l_keys $key] == -1 } {
- error "FAIL: Key |$key| not in list of valid keys"
- }
-
- set d $a_keys($key)
-
- if { [info exists check_array($key) ] } {
- set check $check_array($key)
- } else {
- set check {}
- }
-
- if { [llength $d] > 1 } {
- if { [llength $check] != [llength $d] } {
- # Make the check array the right length
- for { set i [llength $check] } { $i < [llength $d] } \
- {incr i} {
- lappend check 0
- }
- set check_array($key) $check
- }
-
- # Find this data's index
- set ndx [lsearch $d $data]
- if { $ndx == -1 } {
- error "FAIL: \
- Data |$data| not found for key $key. Found |$d|"
- }
-
- # Set the bit in the check array
- set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
- } elseif { [string compare $d $data] != 0 } {
- error "FAIL: \
- Invalid data |$data| for key |$key|. Expected |$d|."
- } else {
- set check_array($key) 1
- }
-}
-
-# Dump out the file and verify it
-proc filecheck { file txn } {
- global check_array
- global l_keys
- global nkeys
- global a_keys
- source ./include.tcl
-
- if { [info exists check_array] == 1 } {
- unset check_array
- }
-
- open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
- "-first" "-next"
-
- # Check that everything we checked had all its data
- foreach i [array names check_array] {
- set count 0
- foreach j $check_array($i) {
- if { $j != 1 } {
- puts -nonewline "Key |$i| never found datum"
- puts " [lindex $a_keys($i) $count]"
- }
- incr count
- }
- }
-
- # Check that all keys appeared in the checked array
- set count 0
- foreach k $l_keys {
- if { [info exists check_array($k)] == 0 } {
- puts "filecheck: key |$k| not found. Data: $a_keys($k)"
- }
- incr count
- }
-
- if { $count != $nkeys } {
- puts "filecheck: Got $count keys; expected $nkeys"
- }
-}
-
-proc cleanup { dir env { quiet 0 } } {
- global gen_upgrade
- global is_qnx_test
- global is_je_test
- global old_encrypt
- global passwd
- source ./include.tcl
-
- if { $gen_upgrade == 1 } {
- save_upgrade_files $dir
- }
-
-# check_handles
- set remfiles {}
- set ret [catch { glob $dir/* } result]
- if { $ret == 0 } {
- foreach fileorig $result {
- #
- # We:
- # - Ignore any env-related files, which are
- # those that have __db.* or log.* if we are
- # running in an env. Also ignore files whose
- # names start with REPDIR_; these are replication
- # subdirectories.
- # - Call 'dbremove' on any databases.
- # Remove any remaining temp files.
- #
- switch -glob -- $fileorig {
- */DIR_* -
- */__db.* -
- */log.* -
- */*.jdb {
- if { $env != "NULL" } {
- continue
- } else {
- if { $is_qnx_test } {
- catch {berkdb envremove -force \
- -home $dir} r
- }
- lappend remfiles $fileorig
- }
- }
- *.db {
- set envargs ""
- set encarg ""
- #
- # If in an env, it should be open crypto
- # or not already.
- #
- if { $env != "NULL"} {
- set file [file tail $fileorig]
- set envargs " -env $env "
- if { [is_txnenv $env] } {
- append envargs " -auto_commit "
- }
- } else {
- if { $old_encrypt != 0 } {
- set encarg "-encryptany $passwd"
- }
- set file $fileorig
- }
-
- # If a database is left in a corrupt
- # state, dbremove might not be able to handle
- # it (it does an open before the remove).
- # Be prepared for this, and if necessary,
- # just forcibly remove the file with a warning
- # message.
- set ret [catch \
- {eval {berkdb dbremove} $envargs $encarg \
- $file} res]
- # If dbremove failed and we're not in an env,
- # note that we don't have 100% certainty
- # about whether the previous run used
- # encryption. Try to remove with crypto if
- # we tried without, and vice versa.
- if { $ret != 0 } {
- if { $env == "NULL" && \
- $old_encrypt == 0} {
- set ret [catch \
- {eval {berkdb dbremove} \
- -encryptany $passwd \
- $file} res]
- }
- if { $env == "NULL" && \
- $old_encrypt == 1 } {
- set ret [catch \
- {eval {berkdb dbremove} \
- $file} res]
- }
- if { $ret != 0 } {
- if { $quiet == 0 } {
- puts \
- "FAIL: dbremove in cleanup failed: $res"
- }
- set file $fileorig
- lappend remfiles $file
- }
- }
- }
- default {
- lappend remfiles $fileorig
- }
- }
- }
- if {[llength $remfiles] > 0} {
- #
- # In the HFS file system there are cases where not
- # all files are removed on the first attempt. If
- # it fails, try again a few times.
- #
- set count 0
- while { [catch {eval fileremove -f $remfiles}] == 1 \
- && $count < 5 } {
- incr count
- }
- }
-
- if { $is_je_test } {
- set rval [catch {eval {exec \
- $util_path/db_dump} -h $dir -l } res]
- if { $rval == 0 } {
- set envargs " -env $env "
- if { [is_txnenv $env] } {
- append envargs " -auto_commit "
- }
-
- foreach db $res {
- set ret [catch {eval \
- {berkdb dbremove} $envargs $db } res]
- }
- }
- }
- }
-}
-
-proc log_cleanup { dir } {
- source ./include.tcl
- global gen_upgrade_log
-
- if { $gen_upgrade_log == 1 } {
- save_upgrade_files $dir
- }
-
- set files [glob -nocomplain $dir/log.*]
- if { [llength $files] != 0} {
- foreach f $files {
- fileremove -f $f
- }
- }
-}
-
-proc env_cleanup { dir } {
- global old_encrypt
- global passwd
- source ./include.tcl
-
- set encarg ""
- if { $old_encrypt != 0 } {
- set encarg "-encryptany $passwd"
- }
- set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
- #
- # If something failed and we are left with a region entry
- # in /dev/shmem that is zero-length, the envremove will
- # succeed, and the shm_unlink will succeed, but it will not
- # remove the zero-length entry from /dev/shmem. Remove it
- # using fileremove or else all other tests using an env
- # will immediately fail.
- #
- if { $is_qnx_test == 1 } {
- set region_files [glob -nocomplain /dev/shmem/$dir*]
- if { [llength $region_files] != 0 } {
- foreach f $region_files {
- fileremove -f $f
- }
- }
- }
- log_cleanup $dir
- cleanup $dir NULL
-}
-
-# Start an RPC server. Don't return to caller until the
-# server is up. Wait up to $maxwait seconds.
-proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } {
- source ./include.tcl
- global rpc_svc
- global passwd
-
- set encargs ""
- if { $encrypted == 1 } {
- set encargs " -P $passwd "
- }
-
- if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [eval {exec $util_path/$rpc_svc \
- -h $rpc_testdir} $args $encargs &]
- } else {
- set dpid [eval {exec rsh $rpc_server \
- $rpc_path/$rpc_svc -h $rpc_testdir $args} &]
- }
-
- # Wait a couple of seconds before we start looking for
- # the server.
- tclsleep 2
- set home [file tail $rpc_testdir]
- if { $encrypted == 1 } {
- set encargs " -encryptaes $passwd "
- }
- for { set i 0 } { $i < $maxwait } { incr i } {
- # Try an operation -- while it fails with NOSERVER, sleep for
- # a second and retry.
- if {[catch {berkdb envremove -force -home "$home.FAIL" \
- -server $rpc_server} res] && \
- [is_substr $res DB_NOSERVER:]} {
- tclsleep 1
- } else {
- # Server is up, clean up and return to caller
- break
- }
- if { $i >= $maxwait } {
- puts "FAIL: RPC server\
- not started after $maxwait seconds"
- }
- }
- return $dpid
-}
-
-proc remote_cleanup { server dir localdir } {
- set home [file tail $dir]
- error_check_good cleanup:remove [berkdb envremove -home $home \
- -server $server] 0
- catch {exec rsh $server rm -f $dir/*} ret
- cleanup $localdir NULL
-}
-
-proc help { cmd } {
- if { [info command $cmd] == $cmd } {
- set is_proc [lsearch [info procs $cmd] $cmd]
- if { $is_proc == -1 } {
- # Not a procedure; must be a C command
- # Let's hope that it takes some parameters
- # and that it prints out a message
- puts "Usage: [eval $cmd]"
- } else {
- # It is a tcl procedure
- puts -nonewline "Usage: $cmd"
- set args [info args $cmd]
- foreach a $args {
- set is_def [info default $cmd $a val]
- if { $is_def != 0 } {
- # Default value
- puts -nonewline " $a=$val"
- } elseif {$a == "args"} {
- # Print out flag values
- puts " options"
- args
- } else {
- # No default value
- puts -nonewline " $a"
- }
- }
- puts ""
- }
- } else {
- puts "$cmd is not a command"
- }
-}
-
-# Run a recovery test for a particular operation
-# Notice that we catch the return from CP and do not do anything with it.
-# This is because Solaris CP seems to exit non-zero on occasion, but
-# everything else seems to run just fine.
-#
-# We split it into two functions so that the preparation and command
-# could be executed in a different process than the recovery.
-#
-proc op_codeparse { encodedop op } {
- set op1 ""
- set op2 ""
- switch $encodedop {
- "abort" {
- set op1 $encodedop
- set op2 ""
- }
- "commit" {
- set op1 $encodedop
- set op2 ""
- }
- "prepare-abort" {
- set op1 "prepare"
- set op2 "abort"
- }
- "prepare-commit" {
- set op1 "prepare"
- set op2 "commit"
- }
- "prepare-discard" {
- set op1 "prepare"
- set op2 "discard"
- }
- }
-
- if { $op == "op" } {
- return $op1
- } else {
- return $op2
- }
-}
-
-proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
- source ./include.tcl
-
- set op [op_codeparse $encodedop "op"]
- set op2 [op_codeparse $encodedop "sub"]
- puts "\t$msg $encodedop"
- set gidf ""
- if { $op == "prepare" } {
- sentinel_init
-
- # Fork off a child to run the cmd
- # We append the gid, so start here making sure
- # we don't have old gid's around.
- set outfile $testdir/childlog
- fileremove -f $testdir/gidfile
- set gidf $testdir/gidfile
- set pidlist {}
- # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
- # $op $dir $env_cmd $dbfile $gidf $cmd"
- set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
- $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
- lappend pidlist $p
- watch_procs $pidlist 5
- set f1 [open $testdir/recdout r]
- set r [read $f1]
- puts -nonewline $r
- close $f1
- fileremove -f $testdir/recdout
- } else {
- op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
- }
- op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
-}
-
-proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
- global log_log_record_types
- global recd_debug
- global recd_id
- global recd_op
- source ./include.tcl
-
- #puts "op_recover: $op $dir $env $dbfile $cmd"
-
- set init_file $dir/t1
- set afterop_file $dir/t2
- set final_file $dir/t3
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- # Save the initial file and open the environment and the file
- catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
- copy_extent_file $dir $dbfile init
-
- convert_encrypt $env_cmd
- set env [eval $env_cmd]
- error_check_good envopen [is_valid_env $env] TRUE
-
- set db [berkdb open -auto_commit -env $env $dbfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Dump out file contents for initial case
- open_and_dump_file $dbfile $env $init_file nop \
- dump_file_direction "-first" "-next"
-
- set t [$env txn]
- error_check_bad txn_begin $t NULL
- error_check_good txn_begin [is_substr $t "txn"] 1
-
- # Now fill in the db, tmgr, and the txnid in the command
- set exec_cmd $cmd
-
- set i [lsearch $cmd ENV]
- if { $i != -1 } {
- set exec_cmd [lreplace $exec_cmd $i $i $env]
- }
-
- set i [lsearch $cmd TXNID]
- if { $i != -1 } {
- set exec_cmd [lreplace $exec_cmd $i $i $t]
- }
-
- set i [lsearch $exec_cmd DB]
- if { $i != -1 } {
- set exec_cmd [lreplace $exec_cmd $i $i $db]
- }
-
- # To test DB_CONSUME, we need to expect a record return, not "0".
- set i [lsearch $exec_cmd "-consume"]
- if { $i != -1 } {
- set record_exec_cmd_ret 1
- } else {
- set record_exec_cmd_ret 0
- }
-
- # For the DB_APPEND test, we need to expect a return other than
- # 0; set this flag to be more lenient in the error_check_good.
- set i [lsearch $exec_cmd "-append"]
- if { $i != -1 } {
- set lenient_exec_cmd_ret 1
- } else {
- set lenient_exec_cmd_ret 0
- }
-
- # Execute command and commit/abort it.
- set ret [eval $exec_cmd]
- if { $record_exec_cmd_ret == 1 } {
- error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
- } elseif { $lenient_exec_cmd_ret == 1 } {
- error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
- } else {
- error_check_good "\"$exec_cmd\"" $ret 0
- }
-
- set record_exec_cmd_ret 0
- set lenient_exec_cmd_ret 0
-
- # Sync the file so that we can capture a snapshot to test recovery.
- error_check_good sync:$db [$db sync] 0
-
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
- copy_extent_file $dir $dbfile afterop
- open_and_dump_file $dir/$dbfile.afterop NULL \
- $afterop_file nop dump_file_direction "-first" "-next"
-
- #puts "\t\t\tExecuting txn_$op:$t"
- if { $op == "prepare" } {
- set gid [make_gid global:$t]
- set gfd [open $gidf w+]
- puts $gfd $gid
- close $gfd
- error_check_good txn_$op:$t [$t $op $gid] 0
- } else {
- error_check_good txn_$op:$t [$t $op] 0
- }
-
- switch $op {
- "commit" { puts "\t\tCommand executed and committed." }
- "abort" { puts "\t\tCommand executed and aborted." }
- "prepare" { puts "\t\tCommand executed and prepared." }
- }
-
- # Sync the file so that we can capture a snapshot to test recovery.
- error_check_good sync:$db [$db sync] 0
-
- catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
- copy_extent_file $dir $dbfile final
- open_and_dump_file $dir/$dbfile.final NULL \
- $final_file nop dump_file_direction "-first" "-next"
-
- # If this is an abort or prepare-abort, it should match the
- # original file.
- # If this was a commit or prepare-commit, then this file should
- # match the afterop file.
- # If this was a prepare without an abort or commit, we still
- # have transactions active, and peering at the database from
- # another environment will show data from uncommitted transactions.
- # Thus we just skip this in the prepare-only case; what
- # we care about are the results of a prepare followed by a
- # recovery, which we test later.
- if { $op == "commit" } {
- filesort $afterop_file $afterop_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
- [filecmp $afterop_file.sort $final_file.sort] 0
- } elseif { $op == "abort" } {
- filesort $init_file $init_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(initial,post-$op):diff($init_file,$final_file) \
- [filecmp $init_file.sort $final_file.sort] 0
- } else {
- # Make sure this really is one of the prepare tests
- error_check_good assert:prepare-test $op "prepare"
- }
-
- # Running recovery on this database should not do anything.
- # Flush all data to disk, close the environment and save the
- # file.
- # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
- # you really have an active transaction and you're not allowed
- # to close files that are being acted upon by in-process
- # transactions.
- if { $op != "prepare" } {
- error_check_good close:$db [$db close] 0
- }
-
- #
- # If we are running 'prepare' don't close the env with an
- # active transaction. Leave it alone so the close won't
- # quietly abort it on us.
- if { [is_substr $op "prepare"] != 1 } {
- error_check_good envclose [$env close] 0
- }
- return
-}
-
-proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
- global log_log_record_types
- global recd_debug
- global recd_id
- global recd_op
- global encrypt
- global passwd
- source ./include.tcl
-
- #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
-
- set init_file $dir/t1
- set afterop_file $dir/t2
- set final_file $dir/t3
-
- # Keep track of the log types we've seen
- if { $log_log_record_types == 1} {
- logtrack_read $dir
- }
-
- berkdb debug_check
- puts -nonewline "\t\top_recover_rec: Running recovery ... "
- flush stdout
-
- set recargs "-h $dir -c "
- if { $encrypt > 0 } {
- append recargs " -P $passwd "
- }
- set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- }
- puts -nonewline "complete ... "
-
- #
- # We cannot run db_recover here because that will open an env, run
- # recovery, then close it, which will abort the outstanding txns.
- # We want to do it ourselves.
- #
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_widget $env env] TRUE
-
- error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
- puts "verified"
-
- # If we left a txn as prepared, but not aborted or committed,
- # we need to do a txn_recover. Make sure we have the same
- # number of txns we want.
- if { $op == "prepare"} {
- set txns [$env txn_recover]
- error_check_bad txnrecover [llength $txns] 0
- set gfd [open $gidf r]
- set origgid [read -nonewline $gfd]
- close $gfd
- set txnlist [lindex $txns 0]
- set t [lindex $txnlist 0]
- set gid [lindex $txnlist 1]
- error_check_good gidcompare $gid $origgid
- puts "\t\t\tExecuting txn_$op2:$t"
- error_check_good txn_$op2:$t [$t $op2] 0
- #
- # If we are testing discard, we do need to resolve
- # the txn, so get the list again and now abort it.
- #
- if { $op2 == "discard" } {
- set txns [$env txn_recover]
- error_check_bad txnrecover [llength $txns] 0
- set txnlist [lindex $txns 0]
- set t [lindex $txnlist 0]
- set gid [lindex $txnlist 1]
- error_check_good gidcompare $gid $origgid
- puts "\t\t\tExecuting txn_abort:$t"
- error_check_good disc_txn_abort:$t [$t abort] 0
- }
- }
-
- open_and_dump_file $dir/$dbfile NULL $final_file nop \
- dump_file_direction "-first" "-next"
- if { $op == "commit" || $op2 == "commit" } {
- filesort $afterop_file $afterop_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
- [filecmp $afterop_file.sort $final_file.sort] 0
- } else {
- filesort $init_file $init_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(initial,post-$op):diff($init_file,$final_file) \
- [filecmp $init_file.sort $final_file.sort] 0
- }
-
- # Now close the environment, substitute a file that will need
- # recovery and try running recovery again.
- reset_env $env
- if { $op == "commit" || $op2 == "commit" } {
- catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
- move_file_extent $dir $dbfile init copy
- } else {
- catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
- move_file_extent $dir $dbfile afterop copy
- }
-
- berkdb debug_check
- puts -nonewline "\t\tRunning recovery on pre-op database ... "
- flush stdout
-
- set stat [catch {eval exec $util_path/db_recover $recargs} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- }
- puts -nonewline "complete ... "
-
- error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0
-
- puts "verified"
-
- set env [eval $env_cmd]
-
- open_and_dump_file $dir/$dbfile NULL $final_file nop \
- dump_file_direction "-first" "-next"
- if { $op == "commit" || $op2 == "commit" } {
- filesort $final_file $final_file.sort
- filesort $afterop_file $afterop_file.sort
- error_check_good \
- diff(post-$op,recovered):diff($afterop_file,$final_file) \
- [filecmp $afterop_file.sort $final_file.sort] 0
- } else {
- filesort $init_file $init_file.sort
- filesort $final_file $final_file.sort
- error_check_good \
- diff(initial,post-$op):diff($init_file,$final_file) \
- [filecmp $init_file.sort $final_file.sort] 0
- }
-
- # This should just close the environment, not blow it away.
- reset_env $env
-}
-
-proc populate { db method txn n dups bigdata } {
- source ./include.tcl
-
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $n } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } elseif { $dups == 1 } {
- set key duplicate_key
- } else {
- set key $str
- }
- if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
- set str [replicate $str 1000]
- }
-
- set ret [$db put -txn $txn $key $str]
- error_check_good db_put:$key $ret 0
- incr count
- }
- close $did
- return 0
-}
-
-proc big_populate { db txn n } {
- source ./include.tcl
-
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $n } {
- set key [replicate $str 50]
- set ret [$db put -txn $txn $key $str]
- error_check_good db_put:$key $ret 0
- incr count
- }
- close $did
- return 0
-}
-
-proc unpopulate { db txn num } {
- source ./include.tcl
-
- set c [eval {$db cursor} "-txn $txn"]
- error_check_bad $db:cursor $c NULL
- error_check_good $db:cursor [is_substr $c $db] 1
-
- set i 0
- for {set d [$c get -first] } { [llength $d] != 0 } {
- set d [$c get -next] } {
- $c del
- incr i
- if { $num != 0 && $ >= $num } {
- break
- }
- }
- error_check_good cursor_close [$c close] 0
- return 0
-}
-
-proc reset_env { env } {
- error_check_good env_close [$env close] 0
-}
-
-proc maxlocks { myenv locker_id obj_id num } {
- return [countlocks $myenv $locker_id $obj_id $num ]
-}
-
-proc maxwrites { myenv locker_id obj_id num } {
- return [countlocks $myenv $locker_id $obj_id $num ]
-}
-
-proc minlocks { myenv locker_id obj_id num } {
- return [countlocks $myenv $locker_id $obj_id $num ]
-}
-
-proc minwrites { myenv locker_id obj_id num } {
- return [countlocks $myenv $locker_id $obj_id $num ]
-}
-
-proc countlocks { myenv locker_id obj_id num } {
- set locklist ""
- for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
- set r [catch {$myenv lock_get read $locker_id \
- [expr $obj_id * 1000 + $i]} l ]
- if { $r != 0 } {
- puts $l
- return ERROR
- } else {
- error_check_good lockget:$obj_id [is_substr $l $myenv] 1
- lappend locklist $l
- }
- }
-
- # Now acquire one write lock, except for obj_id 1, which doesn't
- # acquire any. We'll use obj_id 1 to test minwrites.
- if { $obj_id != 1 } {
- set r [catch {$myenv lock_get write $locker_id \
- [expr $obj_id * 1000 + 10]} l ]
- if { $r != 0 } {
- puts $l
- return ERROR
- } else {
- error_check_good lockget:$obj_id [is_substr $l $myenv] 1
- lappend locklist $l
- }
- }
-
- # Get one extra write lock for obj_id 2. We'll use
- # obj_id 2 to test maxwrites.
- #
- if { $obj_id == 2 } {
- set extra [catch {$myenv lock_get write \
- $locker_id [expr $obj_id * 1000 + 11]} l ]
- if { $extra != 0 } {
- puts $l
- return ERROR
- } else {
- error_check_good lockget:$obj_id [is_substr $l $myenv] 1
- lappend locklist $l
- }
- }
-
- set ret [ring $myenv $locker_id $obj_id $num]
-
- foreach l $locklist {
- error_check_good lockput:$l [$l put] 0
- }
-
- return $ret
-}
-
-# This routine will let us obtain a ring of deadlocks.
-# Each locker will get a lock on obj_id, then sleep, and
-# then try to lock (obj_id + 1) % num.
-# When the lock is finally granted, we release our locks and
-# return 1 if we got both locks and DEADLOCK if we deadlocked.
-# The results here should be that 1 locker deadlocks and the
-# rest all finish successfully.
-proc ring { myenv locker_id obj_id num } {
- source ./include.tcl
-
- if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
- puts $lock1
- return ERROR
- } else {
- error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
- }
-
- tclsleep 30
- set nextobj [expr ($obj_id + 1) % $num]
- set ret 1
- if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
- if {[string match "*DEADLOCK*" $lock2] == 1} {
- set ret DEADLOCK
- } else {
- if {[string match "*NOTGRANTED*" $lock2] == 1} {
- set ret DEADLOCK
- } else {
- puts $lock2
- set ret ERROR
- }
- }
- } else {
- error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
- }
-
- # Now release the first lock
- error_check_good lockput:$lock1 [$lock1 put] 0
-
- if {$ret == 1} {
- error_check_bad lockget:$obj_id $lock2 NULL
- error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
- error_check_good lockput:$lock2 [$lock2 put] 0
- }
- return $ret
-}
-
-# This routine will create massive deadlocks.
-# Each locker will get a readlock on obj_id, then sleep, and
-# then try to upgrade the readlock to a write lock.
-# When the lock is finally granted, we release our first lock and
-# return 1 if we got both locks and DEADLOCK if we deadlocked.
-# The results here should be that 1 locker succeeds in getting all
-# the locks and everyone else deadlocks.
-proc clump { myenv locker_id obj_id num } {
- source ./include.tcl
-
- set obj_id 10
- if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
- puts $lock1
- return ERROR
- } else {
- error_check_good lockget:$obj_id \
- [is_valid_lock $lock1 $myenv] TRUE
- }
-
- tclsleep 30
- set ret 1
- if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
- if {[string match "*DEADLOCK*" $lock2] == 1} {
- set ret DEADLOCK
- } else {
- if {[string match "*NOTGRANTED*" $lock2] == 1} {
- set ret DEADLOCK
- } else {
- puts $lock2
- set ret ERROR
- }
- }
- } else {
- error_check_good \
- lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
- }
-
- # Now release the first lock
- error_check_good lockput:$lock1 [$lock1 put] 0
-
- if {$ret == 1} {
- error_check_good \
- lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
- error_check_good lockput:$lock2 [$lock2 put] 0
- }
- return $ret
-}
-
-proc dead_check { t procs timeout dead clean other } {
- error_check_good $t:$procs:other $other 0
- switch $t {
- ring {
- # With timeouts the number of deadlocks is
- # unpredictable: test for at least one deadlock.
- if { $timeout != 0 && $dead > 1 } {
- set clean [ expr $clean + $dead - 1]
- set dead 1
- }
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- clump {
- # With timeouts the number of deadlocks is
- # unpredictable: test for no more than one
- # successful lock.
- if { $timeout != 0 && $dead == $procs } {
- set clean 1
- set dead [expr $procs - 1]
- }
- error_check_good $t:$procs:deadlocks $dead \
- [expr $procs - 1]
- error_check_good $t:$procs:success $clean 1
- }
- oldyoung {
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- maxlocks {
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- maxwrites {
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- minlocks {
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- minwrites {
- error_check_good $t:$procs:deadlocks $dead 1
- error_check_good $t:$procs:success $clean \
- [expr $procs - 1]
- }
- default {
- error "Test $t not implemented"
- }
- }
-}
-
-proc rdebug { id op where } {
- global recd_debug
- global recd_id
- global recd_op
-
- set recd_debug $where
- set recd_id $id
- set recd_op $op
-}
-
-proc rtag { msg id } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { $id == $tag } {
- return 1
- } else {
- return 0
- }
-}
-
-proc zero_list { n } {
- set ret ""
- while { $n > 0 } {
- lappend ret 0
- incr n -1
- }
- return $ret
-}
-
-proc check_dump { k d } {
- puts "key: $k data: $d"
-}
-
-proc reverse { s } {
- set res ""
- for { set i 0 } { $i < [string length $s] } { incr i } {
- set res "[string index $s $i]$res"
- }
-
- return $res
-}
-
-#
-# This is a internal only proc. All tests should use 'is_valid_db' etc.
-#
-proc is_valid_widget { w expected } {
- # First N characters must match "expected"
- set l [string length $expected]
- incr l -1
- if { [string compare [string range $w 0 $l] $expected] != 0 } {
- return $w
- }
-
- # Remaining characters must be digits
- incr l 1
- for { set i $l } { $i < [string length $w] } { incr i} {
- set c [string index $w $i]
- if { $c < "0" || $c > "9" } {
- return $w
- }
- }
-
- return TRUE
-}
-
-proc is_valid_db { db } {
- return [is_valid_widget $db db]
-}
-
-proc is_valid_env { env } {
- return [is_valid_widget $env env]
-}
-
-proc is_valid_cursor { dbc db } {
- return [is_valid_widget $dbc $db.c]
-}
-
-proc is_valid_lock { lock env } {
- return [is_valid_widget $lock $env.lock]
-}
-
-proc is_valid_logc { logc env } {
- return [is_valid_widget $logc $env.logc]
-}
-
-proc is_valid_mpool { mpool env } {
- return [is_valid_widget $mpool $env.mp]
-}
-
-proc is_valid_page { page mpool } {
- return [is_valid_widget $page $mpool.pg]
-}
-
-proc is_valid_txn { txn env } {
- return [is_valid_widget $txn $env.txn]
-}
-
-proc is_valid_mutex { m env } {
- return [is_valid_widget $m $env.mutex]
-}
-
-proc is_valid_lock {l env} {
- return [is_valid_widget $l $env.lock]
-}
-
-proc is_valid_locker {l } {
- return [is_valid_widget $l ""]
-}
-
-proc is_valid_seq { seq } {
- return [is_valid_widget $seq seq]
-}
-
-proc send_cmd { fd cmd {sleep 2}} {
- source ./include.tcl
-
- puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
- puts \"FAIL: \$ret\" \
- }"
- puts $fd "flush stdout"
- flush $fd
- berkdb debug_check
- tclsleep $sleep
-
- set r [rcv_result $fd]
- return $r
-}
-
-proc rcv_result { fd } {
- set r [gets $fd result]
- error_check_bad remote_read $r -1
-
- return $result
-}
-
-proc send_timed_cmd { fd rcv_too cmd } {
- set c1 "set start \[timestamp -r\]; "
- set c2 "puts \[expr \[timestamp -r\] - \$start\]"
- set full_cmd [concat $c1 $cmd ";" $c2]
-
- puts $fd $full_cmd
- puts $fd "flush stdout"
- flush $fd
- return 0
-}
-
-#
-# The rationale behind why we have *two* "data padding" routines is outlined
-# below:
-#
-# Both pad_data and chop_data truncate data that is too long. However,
-# pad_data also adds the pad character to pad data out to the fixed length
-# record length.
-#
-# Which routine you call does not depend on the length of the data you're
-# using, but on whether you're doing a put or a get. When we do a put, we
-# have to make sure the data isn't longer than the size of a record because
-# otherwise we'll get an error (use chop_data). When we do a get, we want to
-# check that db padded everything correctly (use pad_data on the value against
-# which we are comparing).
-#
-# We don't want to just use the pad_data routine for both purposes, because
-# we want to be able to test whether or not db is padding correctly. For
-# example, the queue access method had a bug where when a record was
-# overwritten (*not* a partial put), only the first n bytes of the new entry
-# were written, n being the new entry's (unpadded) length. So, if we did
-# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
-# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
-# have gotten the "correct" result, but we wouldn't have found this bug.
-proc chop_data {method data} {
- global fixed_len
-
- if {[is_fixed_length $method] == 1 && \
- [string length $data] > $fixed_len} {
- return [eval {binary format a$fixed_len $data}]
- } else {
- return $data
- }
-}
-
-proc pad_data {method data} {
- global fixed_len
-
- if {[is_fixed_length $method] == 1} {
- return [eval {binary format a$fixed_len $data}]
- } else {
- return $data
- }
-}
-
-proc make_fixed_length {method data {pad 0}} {
- global fixed_len
-
- if {[is_fixed_length $method] == 1} {
- if {[string length $data] > $fixed_len } {
- error_check_bad make_fixed_len:TOO_LONG 1 1
- }
- while { [string length $data] < $fixed_len } {
- set data [format $data%c $pad]
- }
- }
- return $data
-}
-
-proc make_gid {data} {
- while { [string length $data] < 128 } {
- set data [format ${data}0]
- }
- return $data
-}
-
-# shift data for partial
-# pad with fixed pad (which is NULL)
-proc partial_shift { data offset direction} {
- global fixed_len
-
- set len [expr $fixed_len - 1]
-
- if { [string compare $direction "right"] == 0 } {
- for { set i 1} { $i <= $offset } {incr i} {
- set data [binary format x1a$len $data]
- }
- } elseif { [string compare $direction "left"] == 0 } {
- for { set i 1} { $i <= $offset } {incr i} {
- set data [string range $data 1 end]
- set data [binary format a$len $data]
- }
- }
- return $data
-}
-
-# string compare does not always work to compare
-# this data, nor does expr (==)
-# specialized routine for comparison
-# (for use in fixed len recno and q)
-proc binary_compare { data1 data2 } {
- if { [string length $data1] != [string length $data2] || \
- [string compare -length \
- [string length $data1] $data1 $data2] != 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-# This is a comparison function used with the lsort command.
-# It treats its inputs as 32 bit signed integers for comparison,
-# and is coded to work with both 32 bit and 64 bit versions of tclsh.
-proc int32_compare { val1 val2 } {
- # Big is set to 2^32 on a 64 bit machine, or 0 on 32 bit machine.
- set big [expr 0xffffffff + 1]
- if { $val1 >= 0x80000000 } {
- set val1 [expr $val1 - $big]
- }
- if { $val2 >= 0x80000000 } {
- set val2 [expr $val2 - $big]
- }
- return [expr $val1 - $val2]
-}
-
-proc convert_method { method } {
- switch -- $method {
- -btree -
- -dbtree -
- dbtree -
- -ddbtree -
- ddbtree -
- -rbtree -
- BTREE -
- DB_BTREE -
- DB_RBTREE -
- RBTREE -
- bt -
- btree -
- db_btree -
- db_rbtree -
- rbt -
- rbtree { return "-btree" }
-
- -dhash -
- -ddhash -
- -hash -
- DB_HASH -
- HASH -
- dhash -
- ddhash -
- db_hash -
- h -
- hash { return "-hash" }
-
- -queue -
- DB_QUEUE -
- QUEUE -
- db_queue -
- q -
- qam -
- queue -
- -iqueue -
- DB_IQUEUE -
- IQUEUE -
- db_iqueue -
- iq -
- iqam -
- iqueue { return "-queue" }
-
- -queueextent -
- QUEUEEXTENT -
- qe -
- qamext -
- -queueext -
- queueextent -
- queueext -
- -iqueueextent -
- IQUEUEEXTENT -
- iqe -
- iqamext -
- -iqueueext -
- iqueueextent -
- iqueueext { return "-queue" }
-
- -frecno -
- -recno -
- -rrecno -
- DB_FRECNO -
- DB_RECNO -
- DB_RRECNO -
- FRECNO -
- RECNO -
- RRECNO -
- db_frecno -
- db_recno -
- db_rrecno -
- frec -
- frecno -
- rec -
- recno -
- rrec -
- rrecno { return "-recno" }
-
- default { error "FAIL:[timestamp] $method: unknown method" }
- }
-}
-
-proc split_encargs { largs encargsp } {
- global encrypt
- upvar $encargsp e
- set eindex [lsearch $largs "-encrypta*"]
- if { $eindex == -1 } {
- set e ""
- set newl $largs
- } else {
- set eend [expr $eindex + 1]
- set e [lrange $largs $eindex $eend]
- set newl [lreplace $largs $eindex $eend "-encrypt"]
- }
- return $newl
-}
-
-proc convert_encrypt { largs } {
- global encrypt
- global old_encrypt
-
- set old_encrypt $encrypt
- set encrypt 0
- if { [lsearch $largs "-encrypt*"] != -1 } {
- set encrypt 1
- }
-}
-
-# If recno-with-renumbering or btree-with-renumbering is specified, then
-# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
-# -flags argument.
-proc convert_args { method {largs ""} } {
- global fixed_len
- global gen_upgrade
- global upgrade_be
- source ./include.tcl
-
- if { [string first - $largs] == -1 &&\
- [string compare $largs ""] != 0 &&\
- [string compare $largs {{}}] != 0 } {
- set errstring "args must contain a hyphen; does this test\
- have no numeric args?"
- puts "FAIL:[timestamp] $errstring (largs was $largs)"
- return -code return
- }
-
- convert_encrypt $largs
- if { $gen_upgrade == 1 && $upgrade_be == 1 } {
- append largs " -lorder 4321 "
- } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
- append largs " -lorder 1234 "
- }
-
- if { [is_rrecno $method] == 1 } {
- append largs " -renumber "
- } elseif { [is_rbtree $method] == 1 } {
- append largs " -recnum "
- } elseif { [is_dbtree $method] == 1 } {
- append largs " -dup "
- } elseif { [is_ddbtree $method] == 1 } {
- append largs " -dup "
- append largs " -dupsort "
- } elseif { [is_dhash $method] == 1 } {
- append largs " -dup "
- } elseif { [is_ddhash $method] == 1 } {
- append largs " -dup "
- append largs " -dupsort "
- } elseif { [is_queueext $method] == 1 } {
- append largs " -extent 4 "
- }
-
- if { [is_iqueue $method] == 1 || [is_iqueueext $method] == 1 } {
- append largs " -inorder "
- }
-
- # Default padding character is ASCII nul.
- set fixed_pad 0
- if {[is_fixed_length $method] == 1} {
- append largs " -len $fixed_len -pad $fixed_pad "
- }
- return $largs
-}
-
-proc is_btree { method } {
- set names { -btree BTREE DB_BTREE bt btree }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_dbtree { method } {
- set names { -dbtree dbtree }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_ddbtree { method } {
- set names { -ddbtree ddbtree }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_rbtree { method } {
- set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_recno { method } {
- set names { -recno DB_RECNO RECNO db_recno rec recno}
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_rrecno { method } {
- set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_frecno { method } {
- set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_hash { method } {
- set names { -hash DB_HASH HASH db_hash h hash }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_dhash { method } {
- set names { -dhash dhash }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_ddhash { method } {
- set names { -ddhash ddhash }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_queue { method } {
- if { [is_queueext $method] == 1 || [is_iqueue $method] == 1 || \
- [is_iqueueext $method] == 1 } {
- return 1
- }
-
- set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_queueext { method } {
- if { [is_iqueueext $method] == 1 } {
- return 1
- }
-
- set names { -queueextent queueextent QUEUEEXTENT qe qamext \
- queueext -queueext }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_iqueue { method } {
- if { [is_iqueueext $method] == 1 } {
- return 1
- }
-
- set names { -iqueue DB_IQUEUE IQUEUE db_iqueue iq iqueue iqam }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_iqueueext { method } {
- set names { -iqueueextent iqueueextent IQUEUEEXTENT iqe iqamext \
- iqueueext -iqueueext }
- if { [lsearch $names $method] >= 0 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_record_based { method } {
- if { [is_recno $method] || [is_frecno $method] ||
- [is_rrecno $method] || [is_queue $method] } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_fixed_length { method } {
- if { [is_queue $method] || [is_frecno $method] } {
- return 1
- } else {
- return 0
- }
-}
-
-# Sort lines in file $in and write results to file $out.
-# This is a more portable alternative to execing the sort command,
-# which has assorted issues on NT [#1576].
-# The addition of a "-n" argument will sort numerically.
-proc filesort { in out { arg "" } } {
- set i [open $in r]
-
- set ilines {}
- while { [gets $i line] >= 0 } {
- lappend ilines $line
- }
-
- if { [string compare $arg "-n"] == 0 } {
- set olines [lsort -integer $ilines]
- } else {
- set olines [lsort $ilines]
- }
-
- close $i
-
- set o [open $out w]
- foreach line $olines {
- puts $o $line
- }
-
- close $o
-}
-
-# Print lines up to the nth line of infile out to outfile, inclusive.
-# The optional beg argument tells us where to start.
-proc filehead { n infile outfile { beg 0 } } {
- set in [open $infile r]
- set out [open $outfile w]
-
- # Sed uses 1-based line numbers, and so we do too.
- for { set i 1 } { $i < $beg } { incr i } {
- if { [gets $in junk] < 0 } {
- break
- }
- }
-
- for { } { $i <= $n } { incr i } {
- if { [gets $in line] < 0 } {
- break
- }
- puts $out $line
- }
-
- close $in
- close $out
-}
-
-# Remove file (this replaces $RM).
-# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
-proc fileremove { args } {
- set forceflag ""
- foreach a $args {
- if { [string first - $a] == 0 } {
- # It's a flag. Better be f.
- if { [string first f $a] != 1 } {
- return -code error "bad flag to fileremove"
- } else {
- set forceflag "-force"
- }
- } else {
- eval {file delete $forceflag $a}
- }
- }
-}
-
-proc findfail { args } {
- set errstring {}
- foreach a $args {
- if { [file exists $a] == 0 } {
- continue
- }
- set f [open $a r]
- while { [gets $f line] >= 0 } {
- if { [string first FAIL $line] == 0 } {
- lappend errstring $a:$line
- }
- }
- close $f
- }
- return $errstring
-}
-
-# Sleep for s seconds.
-proc tclsleep { s } {
- # On Windows, the system time-of-day clock may update as much
- # as 55 ms late due to interrupt timing. Don't take any
- # chances; sleep extra-long so that when tclsleep 1 returns,
- # it's guaranteed to be a new second.
- after [expr $s * 1000 + 56]
-}
-
-# Kill a process.
-proc tclkill { id } {
- source ./include.tcl
-
- while { [ catch {exec $KILL -0 $id} ] == 0 } {
- catch {exec $KILL -9 $id}
- tclsleep 5
- }
-}
-
-# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
-proc filecmp { file_a file_b } {
- set fda [open $file_a r]
- set fdb [open $file_b r]
-
- set nra 0
- set nrb 0
-
- # The gets can't be in the while condition because we'll
- # get short-circuit evaluated.
- while { $nra >= 0 && $nrb >= 0 } {
- set nra [gets $fda aline]
- set nrb [gets $fdb bline]
-
- if { $nra != $nrb || [string compare $aline $bline] != 0} {
- close $fda
- close $fdb
- return 1
- }
- }
-
- close $fda
- close $fdb
- return 0
-}
-
-# Give two SORTED files, one of which is a complete superset of the other,
-# extract out the unique portions of the superset and put them in
-# the given outfile.
-proc fileextract { superset subset outfile } {
- set sup [open $superset r]
- set sub [open $subset r]
- set outf [open $outfile w]
-
- # The gets can't be in the while condition because we'll
- # get short-circuit evaluated.
- set nrp [gets $sup pline]
- set nrb [gets $sub bline]
- while { $nrp >= 0 } {
- if { $nrp != $nrb || [string compare $pline $bline] != 0} {
- puts $outf $pline
- } else {
- set nrb [gets $sub bline]
- }
- set nrp [gets $sup pline]
- }
-
- close $sup
- close $sub
- close $outf
- return 0
-}
-
-# Verify all .db files in the specified directory.
-proc verify_dir { {directory $testdir} { pref "" } \
- { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } } {
- global encrypt
- global passwd
-
- # If we're doing database verification between tests, we don't
- # want to do verification twice without an intervening cleanup--some
- # test was skipped. Always verify by default (noredo == 0) so
- # that explicit calls to verify_dir during tests don't require
- # cleanup commands.
- if { $noredo == 1 } {
- if { [file exists $directory/NOREVERIFY] == 1 } {
- if { $quiet == 0 } {
- puts "Skipping verification."
- }
- return 0
- }
- set f [open $directory/NOREVERIFY w]
- close $f
- }
-
- if { [catch {glob $directory/*.db} dbs] != 0 } {
- # No files matched
- return 0
- }
- set errfilearg "-errfile /dev/stderr "
- set errpfxarg {-errpfx "FAIL: verify" }
- set errarg $errfilearg$errpfxarg
- set ret 0
-
- # Open an env, so that we have a large enough cache. Pick
- # a fairly generous default if we haven't specified something else.
-
- if { $cachesize == 0 } {
- set cachesize [expr 1024 * 1024]
- }
- set encarg ""
- if { $encrypt != 0 } {
- set encarg "-encryptaes $passwd"
- }
-
- set env [eval {berkdb_env -create -private} $encarg \
- {-cachesize [list 0 $cachesize 0]}]
- set earg " -env $env $errarg "
-
- # The 'unref' flag means that we report unreferenced pages
- # at all times. This is the default behavior.
- # If we have a test which leaves unreferenced pages on systems
- # where HAVE_FTRUNCATE is not on, then we call verify_dir with
- # unref == 0.
- set uflag "-unref"
- if { $unref == 0 } {
- set uflag ""
- }
-
- foreach db $dbs {
- if { [catch \
- {eval {berkdb dbverify} $uflag $earg $db} res] != 0 } {
- puts $res
- puts "FAIL:[timestamp] Verification of $db failed."
- set ret 1
- continue
- } else {
- error_check_good verify:$db $res 0
- if { $quiet == 0 } {
- puts "${pref}Verification of $db succeeded."
- }
- }
-
- # Skip the dump if it's dangerous to do it.
- if { $nodump == 0 } {
- if { [catch {eval dumploadtest $db} res] != 0 } {
- puts $res
- puts "FAIL:[timestamp] Dump/load of $db failed."
- set ret 1
- continue
- } else {
- error_check_good dumpload:$db $res 0
- if { $quiet == 0 } {
- puts \
- "${pref}Dump/load of $db succeeded."
- }
- }
- }
- }
-
- error_check_good vrfyenv_close [$env close] 0
-
- return $ret
-}
-
-# Is the database handle in $db a master database containing subdbs?
-proc check_for_subdbs { db } {
- set stat [$db stat]
- for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
- set elem [lindex $stat $i]
- if { [string compare [lindex $elem 0] Flags] == 0 } {
- # This is the list of flags; look for
- # "subdatabases".
- if { [is_substr [lindex $elem 1] subdatabases] } {
- return 1
- }
- }
- }
- return 0
-}
-
-proc db_compare { olddb newdb olddbname newdbname } {
- # Walk through olddb and newdb and make sure their contents
- # are identical.
- set oc [$olddb cursor]
- set nc [$newdb cursor]
- error_check_good orig_cursor($olddbname) \
- [is_valid_cursor $oc $olddb] TRUE
- error_check_good new_cursor($olddbname) \
- [is_valid_cursor $nc $newdb] TRUE
-
- for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
- { set odbt [$oc get -next] } {
- set ndbt [$nc get -get_both \
- [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
- error_check_good db_compare($olddbname/$newdbname) $ndbt $odbt
- }
-
- for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
- { set ndbt [$nc get -next] } {
- set odbt [$oc get -get_both \
- [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
- error_check_good db_compare_back($olddbname) $odbt $ndbt
- }
-
- error_check_good orig_cursor_close($olddbname) [$oc close] 0
- error_check_good new_cursor_close($newdbname) [$nc close] 0
-
- return 0
-}
-
-proc dumploadtest { db } {
- global util_path
- global encrypt
- global passwd
-
- set newdbname $db-dumpload.db
-
- set dbarg ""
- set utilflag ""
- if { $encrypt != 0 } {
- set dbarg "-encryptany $passwd"
- set utilflag "-P $passwd"
- }
-
- # Dump/load the whole file, including all subdbs.
- set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
- $db | $util_path/db_load $utilflag $newdbname} res]
- error_check_good db_dump/db_load($db:$res) $rval 0
-
- # If the old file was empty, there's no new file and we're done.
- if { [file exists $newdbname] == 0 } {
- return 0
- }
-
- # Open original database.
- set olddb [eval {berkdb_open -rdonly} $dbarg $db]
- error_check_good olddb($db) [is_valid_db $olddb] TRUE
-
- if { [check_for_subdbs $olddb] } {
- # If $db has subdatabases, compare each one separately.
- set oc [$olddb cursor]
- error_check_good orig_cursor($db) \
- [is_valid_cursor $oc $olddb] TRUE
-
- for { set dbt [$oc get -first] } \
- { [llength $dbt] > 0 } \
- { set dbt [$oc get -next] } {
- set subdb [lindex [lindex $dbt 0] 0]
-
- set oldsubdb \
- [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
- error_check_good olddb($db) [is_valid_db $oldsubdb] TRUE
-
- # Open the new database.
- set newdb \
- [eval {berkdb_open -rdonly} $dbarg {$newdbname $subdb}]
- error_check_good newdb($db) [is_valid_db $newdb] TRUE
-
- db_compare $oldsubdb $newdb $db $newdbname
- error_check_good new_db_close($db) [$newdb close] 0
- error_check_good old_subdb_close($oldsubdb) [$oldsubdb close] 0
- }
-
- error_check_good oldcclose [$oc close] 0
- } else {
- # Open the new database.
- set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
- error_check_good newdb($db) [is_valid_db $newdb] TRUE
-
- db_compare $olddb $newdb $db $newdbname
- error_check_good new_db_close($db) [$newdb close] 0
- }
-
- error_check_good orig_db_close($db) [$olddb close] 0
- eval berkdb dbremove $dbarg $newdbname
-}
-
-# Generate randomly ordered, guaranteed-unique four-character strings that can
-# be used to differentiate duplicates without creating duplicate duplicates.
-# (test031 & test032) randstring_init is required before the first call to
-# randstring and initializes things for up to $i distinct strings; randstring
-# gets the next string.
-proc randstring_init { i } {
- global rs_int_list alphabet
-
- # Fail if we can't generate sufficient unique strings.
- if { $i > [expr 26 * 26 * 26 * 26] } {
- set errstring\
- "Duplicate set too large for random string generator"
- puts "FAIL:[timestamp] $errstring"
- return -code return $errstring
- }
-
- set rs_int_list {}
-
- # generate alphabet array
- for { set j 0 } { $j < 26 } { incr j } {
- set a($j) [string index $alphabet $j]
- }
-
- # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
- for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
- for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
- for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
- for { set d4 0 } { $d4 < 26 && $j < $i } \
- { incr d4 } {
- lappend rs_int_list \
- $a($d1)$a($d2)$a($d3)$a($d4)
- incr j
- }
- }
- }
- }
-
- # Randomize the list.
- set rs_int_list [randomize_list $rs_int_list]
-}
-
-# Randomize a list. Returns a randomly-reordered copy of l.
-proc randomize_list { l } {
- set i [llength $l]
-
- for { set j 0 } { $j < $i } { incr j } {
- # Pick a random element from $j to the end
- set k [berkdb random_int $j [expr $i - 1]]
-
- # Swap it with element $j
- set t1 [lindex $l $j]
- set t2 [lindex $l $k]
-
- set l [lreplace $l $j $j $t2]
- set l [lreplace $l $k $k $t1]
- }
-
- return $l
-}
-
-proc randstring {} {
- global rs_int_list
-
- if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
- set errstring "randstring uninitialized or used too often"
- puts "FAIL:[timestamp] $errstring"
- return -code return $errstring
- }
-
- set item [lindex $rs_int_list 0]
- set rs_int_list [lreplace $rs_int_list 0 0]
-
- return $item
-}
-
-# Takes a variable-length arg list, and returns a list containing the list of
-# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
-# flag it finds.
-proc extractflags { args } {
- set inflags 1
- set flags {}
- while { $inflags == 1 } {
- set curarg [lindex $args 0]
- if { [string first "-" $curarg] == 0 } {
- set i 1
- while {[string length [set f \
- [string index $curarg $i]]] > 0 } {
- incr i
- if { [string compare $f "-"] == 0 } {
- set inflags 0
- break
- } else {
- lappend flags $f
- }
- }
- set args [lrange $args 1 end]
- } else {
- set inflags 0
- }
- }
- return [list $args $flags]
-}
-
-# Wrapper for berkdb open, used throughout the test suite so that we can
-# set an errfile/errpfx as appropriate.
-proc berkdb_open { args } {
- global is_envmethod
-
- if { [info exists is_envmethod] == 0 } {
- set is_envmethod 0
- }
-
- set errargs {}
- if { $is_envmethod == 0 } {
- append errargs " -errfile /dev/stderr "
- append errargs " -errpfx \\F\\A\\I\\L"
- }
-
- eval {berkdb open} $errargs $args
-}
-
-# Version without errpfx/errfile, used when we're expecting a failure.
-proc berkdb_open_noerr { args } {
- eval {berkdb open} $args
-}
-
-# Wrapper for berkdb env, used throughout the test suite so that we can
-# set an errfile/errpfx as appropriate.
-proc berkdb_env { args } {
- global is_envmethod
-
- if { [info exists is_envmethod] == 0 } {
- set is_envmethod 0
- }
-
- set errargs {}
- if { $is_envmethod == 0 } {
- append errargs " -errfile /dev/stderr "
- append errargs " -errpfx \\F\\A\\I\\L"
- }
-
- eval {berkdb env} $errargs $args
-}
-
-# Version without errpfx/errfile, used when we're expecting a failure.
-proc berkdb_env_noerr { args } {
- eval {berkdb env} $args
-}
-
-proc check_handles { {outf stdout} } {
- global ohandles
-
- set handles [berkdb handles]
- if {[llength $handles] != [llength $ohandles]} {
- puts $outf "WARNING: Open handles during cleanup: $handles"
- }
- set ohandles $handles
-}
-
-proc open_handles { } {
- return [llength [berkdb handles]]
-}
-
-proc move_file_extent { dir dbfile tag op } {
- set curfiles [get_extfiles $dir $dbfile ""]
- set tagfiles [get_extfiles $dir $dbfile $tag]
- #
- # We want to copy or rename only those that have been saved,
- # so delete all the current extent files so that we don't
- # end up with extra ones we didn't restore from our saved ones.
- foreach extfile $curfiles {
- file delete -force $extfile
- }
- foreach extfile $tagfiles {
- set i [string last "." $extfile]
- incr i
- set extnum [string range $extfile $i end]
- set dbq [make_ext_filename $dir $dbfile $extnum]
- #
- # We can either copy or rename
- #
- file $op -force $extfile $dbq
- }
-}
-
-proc copy_extent_file { dir dbfile tag { op copy } } {
- set files [get_extfiles $dir $dbfile ""]
- foreach extfile $files {
- set i [string last "." $extfile]
- incr i
- set extnum [string range $extfile $i end]
- file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum
- }
-}
-
-proc get_extfiles { dir dbfile tag } {
- if { $tag == "" } {
- set filepat $dir/__dbq.$dbfile.\[0-9\]*
- } else {
- set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]*
- }
- return [glob -nocomplain -- $filepat]
-}
-
-proc make_ext_filename { dir dbfile extnum } {
- return $dir/__dbq.$dbfile.$extnum
-}
-
-# All pids for Windows 9X are negative values. When we want to have
-# unsigned int values, unique to the process, we'll take the absolute
-# value of the pid. This avoids unsigned/signed mistakes, yet
-# guarantees uniqueness, since each system has pids that are all
-# either positive or negative.
-#
-proc sanitized_pid { } {
- set mypid [pid]
- if { $mypid < 0 } {
- set mypid [expr - $mypid]
- }
- puts "PID: [pid] $mypid\n"
- return $mypid
-}
-
-#
-# Extract the page size field from a stat record. Return -1 if
-# none is found.
-#
-proc get_pagesize { stat } {
- foreach field $stat {
- set title [lindex $field 0]
- if {[string compare $title "Page size"] == 0} {
- return [lindex $field 1]
- }
- }
- return -1
-}
-
-# Get a globbed list of source files and executables to use as large
-# data items in overflow page tests.
-proc get_file_list { {small 0} } {
- global is_windows_test
- global is_qnx_test
- global is_je_test
- global src_root
-
- # Skip libraries if we have a debug build.
- if { $is_qnx_test || $is_je_test || [is_debug] == 1 } {
- set small 1
- }
-
- if { $small && $is_windows_test } {
- set templist [glob $src_root/*/*.c */env*.obj]
- } elseif { $small } {
- set templist [glob $src_root/*/*.c ./env*.o]
- } elseif { $is_windows_test } {
- set templist \
- [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
- } else {
- set templist [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
- }
-
- # We don't want a huge number of files, but we do want a nice
- # variety. If there are more than 200 files, pick out a list
- # by taking every other, or every third, or every nth file.
- set filelist {}
- set nfiles 200
- if { [llength $templist] > $nfiles } {
- set skip \
- [expr [llength $templist] / [expr [expr $nfiles / 3] * 2]]
- set i $skip
- while { $i < [llength $templist] } {
- lappend filelist [lindex $templist $i]
- incr i $skip
- }
- } else {
- set filelist $templist
- }
- return $filelist
-}
-
-proc is_cdbenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -cdb] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_lockenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -lock] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_logenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -log] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_mpoolenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -mpool] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_repenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -rep] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_rpcenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -rpc] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_secenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -crypto] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc is_txnenv { env } {
- set sys [$env attributes]
- if { [lsearch $sys -txn] != -1 } {
- return 1
- } else {
- return 0
- }
-}
-
-proc get_home { env } {
- set sys [$env attributes]
- set h [lsearch $sys -home]
- if { $h == -1 } {
- return NULL
- }
- incr h
- return [lindex $sys $h]
-}
-
-proc reduce_dups { nent ndp } {
- upvar $nent nentries
- upvar $ndp ndups
-
- # If we are using a txnenv, assume it is using
- # the default maximum number of locks, cut back
- # so that we don't run out of locks. Reduce
- # by 25% until we fit.
- #
- while { [expr $nentries * $ndups] > 5000 } {
- set nentries [expr ($nentries / 4) * 3]
- set ndups [expr ($ndups / 4) * 3]
- }
-}
-
-proc getstats { statlist field } {
- foreach pair $statlist {
- set txt [lindex $pair 0]
- if { [string equal $txt $field] == 1 } {
- return [lindex $pair 1]
- }
- }
- return -1
-}
-
-# Return the value for a particular field in a set of statistics.
-# Works for regular db stat as well as env stats (log_stat,
-# lock_stat, txn_stat, rep_stat, etc.).
-proc stat_field { handle which_stat field } {
- set stat [$handle $which_stat]
- return [getstats $stat $field ]
-}
-
-proc big_endian { } {
- global tcl_platform
- set e $tcl_platform(byteOrder)
- if { [string compare $e littleEndian] == 0 } {
- return 0
- } elseif { [string compare $e bigEndian] == 0 } {
- return 1
- } else {
- error "FAIL: Unknown endianness $e"
- }
-}
-
-# Search logs to find if we have debug records.
-proc log_has_debug_records { dir } {
- source ./include.tcl
- global encrypt
-
- set tmpfile $dir/printlog.out
- set stat [catch \
- {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
- error_check_good db_printlog $stat 0
-
- set f [open $tmpfile r]
- while { [gets $f record] >= 0 } {
- set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
- if { $r == 1 && [string match *_debug $name] != 1 } {
- close $f
- fileremove $tmpfile
- return 1
- }
- }
- close $f
- fileremove $tmpfile
- return 0
-}
-
-# Set up a temporary database to check if this is a debug build.
-proc is_debug { } {
- source ./include.tcl
-
- set tempdir $testdir/temp
- file mkdir $tempdir
- set env [berkdb_env -create -log -home $testdir/temp]
- error_check_good temp_env_open [is_valid_env $env] TRUE
-
- set file temp.db
- set db [berkdb_open -create -env $env -btree $file]
- error_check_good temp_db_open [is_valid_db $db] TRUE
-
- set key KEY
- set data DATA
- error_check_good temp_db_put [$db put $key $data] 0
- set ret [$db get $key]
- error_check_good get_key [lindex [lindex $ret 0] 0] $key
- error_check_good get_data [lindex [lindex $ret 0] 1] $data
- error_check_good temp_db_close [$db close] 0
- error_check_good temp_db_remove [$env dbremove $file] 0
- error_check_good temp_env_close [$env close] 0
-
- if { [log_has_debug_records $tempdir] == 1 } {
- return 1
- }
- return 0
-}
-
-proc adjust_logargs { logtype } {
- if { $logtype == "in-memory" } {
- set lbuf [expr 8 * [expr 1024 * 1024]]
- set logargs " -log_inmemory -log_buffer $lbuf "
- } elseif { $logtype == "on-disk" } {
- set logargs ""
- } else {
- puts "FAIL: unrecognized log type $logtype"
- }
- return $logargs
-}
-
-proc adjust_txnargs { logtype } {
- if { $logtype == "in-memory" } {
- set txnargs " -txn "
- } elseif { $logtype == "on-disk" } {
- set txnargs " -txn nosync "
- } else {
- puts "FAIL: unrecognized log type $logtype"
- }
- return $txnargs
-}
-
diff --git a/storage/bdb/test/txn001.tcl b/storage/bdb/test/txn001.tcl
deleted file mode 100644
index 583b7f21240..00000000000
--- a/storage/bdb/test/txn001.tcl
+++ /dev/null
@@ -1,116 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn001.tcl,v 11.38 2004/01/28 03:36:32 bostic Exp $
-#
-
-# TEST txn001
-# TEST Begin, commit, abort testing.
-proc txn001 { {tnum "001"} { max 1024 } { ntxns 50 } } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- puts -nonewline "Txn$tnum: Basic begin, commit, abort"
-
- if { $tnum != "001"} {
- puts " (with ID wrap)"
- } else {
- puts ""
- }
-
- # Open environment
- env_cleanup $testdir
-
- set env [eval {berkdb_env -create -mode 0644 -txn \
- -txn_max $max -home $testdir}]
- error_check_good evn_open [is_valid_env $env] TRUE
- error_check_good txn_id_set \
- [ $env txn_id_set $txn_curid $txn_maxid ] 0
- txn001_suba $ntxns $env $tnum
- txn001_subb $ntxns $env $tnum
- txn001_subc $ntxns $env $tnum
- # Close and unlink the file
- error_check_good env_close:$env [$env close] 0
-}
-
-proc txn001_suba { ntxns env tnum } {
- source ./include.tcl
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "\tTxn$tnum.a: Beginning/Committing $ntxns Transactions in $env"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-}
-
-proc txn001_subb { ntxns env tnum } {
- # We will create a bunch of transactions and abort them.
- set txn_list {}
- set tid_list {}
- puts "\tTxn$tnum.b: Beginning/Aborting Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now abort them all
- foreach t $txn_list {
- error_check_good txn_abort:$t [$t abort] 0
- }
-}
-
-proc txn001_subc { ntxns env tnum } {
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "\tTxn$tnum.c: Beginning/Prepare/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now prepare them all
- foreach t $txn_list {
- error_check_good txn_prepare:$t \
- [$t prepare [make_gid global:$t]] 0
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
-}
-
diff --git a/storage/bdb/test/txn002.tcl b/storage/bdb/test/txn002.tcl
deleted file mode 100644
index 1ecbf9df9df..00000000000
--- a/storage/bdb/test/txn002.tcl
+++ /dev/null
@@ -1,91 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn002.tcl,v 11.41 2004/01/28 03:36:32 bostic Exp $
-#
-
-# TEST txn002
-# TEST Verify that read-only transactions do not write log records.
-proc txn002 { {tnum "002" } { max 1024 } { ntxns 50 } } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- puts -nonewline "Txn$tnum: Read-only transaction test ($max) ($ntxns)"
-
- if { $tnum != "002" } {
- puts " (with ID wrap)"
- } else {
- puts ""
- }
-
- env_cleanup $testdir
- set env [berkdb \
- env -create -mode 0644 -txn -txn_max $max -home $testdir]
- error_check_good dbenv [is_valid_env $env] TRUE
- error_check_good txn_id_set \
- [$env txn_id_set $txn_curid $txn_maxid ] 0
-
- # Save the current bytes in the log.
- set off_start [txn002_logoff $env]
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "\tTxn$tnum.a: Beginning/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
- # Make sure we haven't written any new log records except
- # potentially some recycle records if we were wrapping txnids.
- set off_stop [txn002_logoff $env]
- if { $off_stop != $off_start } {
- txn002_recycle_only $testdir
- }
-
- error_check_good env_close [$env close] 0
-}
-
-proc txn002_logoff { env } {
- set stat [$env log_stat]
- foreach i $stat {
- foreach {txt val} $i {break}
- if { [string compare \
- $txt {Current log file offset}] == 0 } {
- return $val
- }
- }
-}
-
-# Make sure that the only log records found are txn_recycle records
-proc txn002_recycle_only { dir } {
- global util_path
-
- set tmpfile $dir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
- error_check_good db_printlog $stat 0
-
- set f [open $tmpfile r]
- while { [gets $f record] >= 0 } {
- set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
- if { $r == 1 } {
- error_check_good record_type __txn_recycle $name
- }
- }
- close $f
- fileremove $tmpfile
-}
diff --git a/storage/bdb/test/txn003.tcl b/storage/bdb/test/txn003.tcl
deleted file mode 100644
index e6a6d6d14b6..00000000000
--- a/storage/bdb/test/txn003.tcl
+++ /dev/null
@@ -1,238 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn003.tcl,v 11.43 2004/01/28 03:36:33 bostic Exp $
-#
-
-# TEST txn003
-# TEST Test abort/commit/prepare of txns with outstanding child txns.
-proc txn003 { {tnum "003"} } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- puts -nonewline "Txn$tnum: Outstanding child transaction test"
-
- if { $tnum != "003" } {
- puts " (with ID wrap)"
- } else {
- puts ""
- }
- env_cleanup $testdir
- set testfile txn003.db
-
- set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $env] TRUE
- error_check_good txn_id_set \
- [$env txn_id_set $txn_curid $txn_maxid] 0
-
- set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- #
- # Put some data so that we can check commit or abort of child
- #
- set key 1
- set origdata some_data
- set newdata this_is_new_data
- set newdata2 some_other_new_data
-
- error_check_good db_put [$db put -auto_commit $key $origdata] 0
- error_check_good dbclose [$db close] 0
-
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- txn003_check $db $key "Origdata" $origdata
-
- puts "\tTxn$tnum.a: Parent abort"
- set parent [$env txn]
- error_check_good txn_begin [is_valid_txn $parent $env] TRUE
- set child [$env txn -parent $parent]
- error_check_good txn_begin [is_valid_txn $child $env] TRUE
- error_check_good db_put [$db put -txn $child $key $newdata] 0
- error_check_good parent_abort [$parent abort] 0
- txn003_check $db $key "parent_abort" $origdata
- # Check child handle is invalid
- set stat [catch {$child abort} ret]
- error_check_good child_handle $stat 1
- error_check_good child_h2 [is_substr $ret "invalid command name"] 1
-
- puts "\tTxn$tnum.b: Parent commit"
- set parent [$env txn]
- error_check_good txn_begin [is_valid_txn $parent $env] TRUE
- set child [$env txn -parent $parent]
- error_check_good txn_begin [is_valid_txn $child $env] TRUE
- error_check_good db_put [$db put -txn $child $key $newdata] 0
- error_check_good parent_commit [$parent commit] 0
- txn003_check $db $key "parent_commit" $newdata
- # Check child handle is invalid
- set stat [catch {$child abort} ret]
- error_check_good child_handle $stat 1
- error_check_good child_h2 [is_substr $ret "invalid command name"] 1
- error_check_good dbclose [$db close] 0
- error_check_good env_close [$env close] 0
-
- #
- # Since the data check assumes what has come before, the 'commit'
- # operation must be last.
- #
- set hdr "\tTxn$tnum"
- set rlist {
- {begin ".c"}
- {prepare ".d"}
- {abort ".e"}
- {commit ".f"}
- }
- set count 0
- foreach pair $rlist {
- incr count
- set op [lindex $pair 0]
- set msg [lindex $pair 1]
- set msg $hdr$msg
- txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- berkdb debug_check
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- #
- # For prepare we'll then just
- # end up aborting after we test what we need to.
- # So set gooddata to the same as abort.
- switch $op {
- abort {
- set gooddata $newdata
- }
- begin {
- set gooddata $newdata
- }
- commit {
- set gooddata $newdata2
- }
- prepare {
- set gooddata $newdata
- }
- }
- txn003_check $db $key "parent_$op" $gooddata
- error_check_good dbclose [$db close] 0
- error_check_good env_close [$env close] 0
- }
-
- # We can't do the attempted child discard on Windows
- # because it will leave open files that can't be removed.
- # Skip the remainder of the test for Windows.
- if { $is_windows_test == 1 } {
- puts "Skipping remainder of test for Windows"
- return
- }
- puts "\tTxn$tnum.g: Attempt child prepare"
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_env $env] TRUE
- berkdb debug_check
- set db [eval {berkdb_open_noerr} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
-
- set parent [$env txn]
- error_check_good txn_begin [is_valid_txn $parent $env] TRUE
- set child [$env txn -parent $parent]
- error_check_good txn_begin [is_valid_txn $child $env] TRUE
- error_check_good db_put [$db put -txn $child $key $newdata] 0
- set gid [make_gid child_prepare:$child]
- set stat [catch {$child prepare $gid} ret]
- error_check_good child_prepare $stat 1
- error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
-
- puts "\tTxn$tnum.h: Attempt child discard"
- set stat [catch {$child discard} ret]
- error_check_good child_discard $stat 1
-
- # We just panic'd the region, so the next operations will fail.
- # No matter, we still have to clean up all the handles.
-
- set stat [catch {$parent commit} ret]
- error_check_good parent_commit $stat 1
- error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
-
- set stat [catch {$db close} ret]
- error_check_good db_close $stat 1
- error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
-
- set stat [catch {$env close} ret]
- error_check_good env_close $stat 1
- error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
-}
-
-proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
- source ./include.tcl
-
- berkdb debug_check
- sentinel_init
- set gidf $dir/gidfile
- fileremove -f $gidf
- set pidlist {}
- puts "$msg.0: Executing child script to prepare txns"
- berkdb debug_check
- set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
- $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
- lappend pidlist $p
- watch_procs $pidlist 5
- set f1 [open $testdir/txnout r]
- set r [read $f1]
- puts $r
- close $f1
- fileremove -f $testdir/txnout
-
- berkdb debug_check
- puts -nonewline "$msg.1: Running recovery ... "
- flush stdout
- berkdb debug_check
- set env [eval $env_cmd "-recover"]
- error_check_good dbenv-recover [is_valid_env $env] TRUE
- puts "complete"
-
- puts "$msg.2: getting txns from txn_recover"
- set txnlist [$env txn_recover]
- error_check_good txnlist_len [llength $txnlist] 1
- set tpair [lindex $txnlist 0]
-
- set gfd [open $gidf r]
- set ret [gets $gfd parentgid]
- close $gfd
- set txn [lindex $tpair 0]
- set gid [lindex $tpair 1]
- if { $op == "begin" } {
- puts "$msg.2: $op new txn"
- } else {
- puts "$msg.2: $op parent"
- }
- error_check_good gidcompare $gid $parentgid
- if { $op == "prepare" } {
- set gid [make_gid prepare_recover:$txn]
- set stat [catch {$txn $op $gid} ret]
- error_check_good prep_error $stat 1
- error_check_good prep_err \
- [is_substr $ret "transaction already prepared"] 1
- error_check_good txn:prep_abort [$txn abort] 0
- } elseif { $op == "begin" } {
- set stat [catch {$env txn} ret]
- error_check_good begin_error $stat 1
- error_check_good begin_err \
- [is_substr $ret "not yet committed transactions is incomplete"] 1
- error_check_good txn:prep_abort [$txn abort] 0
- } else {
- error_check_good txn:$op [$txn $op] 0
- }
- error_check_good envclose [$env close] 0
-}
-
-proc txn003_check { db key msg gooddata } {
- set kd [$db get $key]
- set data [lindex [lindex $kd 0] 1]
- error_check_good $msg $data $gooddata
-}
diff --git a/storage/bdb/test/txn004.tcl b/storage/bdb/test/txn004.tcl
deleted file mode 100644
index c7accddd9b5..00000000000
--- a/storage/bdb/test/txn004.tcl
+++ /dev/null
@@ -1,62 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn004.tcl,v 11.42 2004/01/28 03:36:33 bostic Exp $
-#
-
-# TEST txn004
-# TEST Test of wraparound txnids (txn001)
-proc txn004 { } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- set orig_curid $txn_curid
- set orig_maxid $txn_maxid
- puts "\tTxn004.1: wraparound txnids"
- set txn_curid [expr $txn_maxid - 2]
- txn001 "004.1"
- puts "\tTxn004.2: closer wraparound txnids"
- set txn_curid [expr $txn_maxid - 3]
- set txn_maxid [expr $txn_maxid - 2]
- txn001 "004.2"
-
- puts "\tTxn004.3: test wraparound txnids"
- txn_idwrap_check $testdir
- set txn_curid $orig_curid
- set txn_maxid $orig_maxid
- return
-}
-
-proc txn_idwrap_check { testdir } {
- global txn_curid
- global txn_maxid
-
- env_cleanup $testdir
-
- # Open/create the txn region
- set e [berkdb_env -create -txn -home $testdir]
- error_check_good env_open [is_substr $e env] 1
-
- set txn1 [$e txn]
- error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
- error_check_good txn_id_set \
- [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
-
- set txn2 [$e txn]
- error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
-
- # txn3 will require a wraparound txnid
- # XXX How can we test it has a wrapped id?
- set txn3 [$e txn]
- error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
-
- error_check_good free_txn1 [$txn1 commit] 0
- error_check_good free_txn2 [$txn2 commit] 0
- error_check_good free_txn3 [$txn3 commit] 0
-
- error_check_good close [$e close] 0
-}
-
diff --git a/storage/bdb/test/txn005.tcl b/storage/bdb/test/txn005.tcl
deleted file mode 100644
index e22581cd129..00000000000
--- a/storage/bdb/test/txn005.tcl
+++ /dev/null
@@ -1,75 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn005.tcl,v 11.37 2004/01/28 03:36:33 bostic Exp $
-#
-
-# TEST txn005
-# TEST Test transaction ID wraparound and recovery.
-proc txn005 {} {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- env_cleanup $testdir
- puts "Txn005: Test transaction wraparound recovery"
-
- # Open/create the txn region
- puts "\tTxn005.a: Create environment"
- set e [berkdb_env -create -txn -home $testdir]
- error_check_good env_open [is_valid_env $e] TRUE
-
- set txn1 [$e txn]
- error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
-
- set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
- error_check_good db [is_valid_db $db] TRUE
- error_check_good txn1_commit [$txn1 commit] 0
-
- puts "\tTxn005.b: Set txn ids"
- error_check_good txn_id_set \
- [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
-
- # txn2 and txn3 will require a wraparound txnid
- set txn2 [$e txn]
- error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
-
- error_check_good put [$db put -txn $txn2 "a" ""] 0
- error_check_good txn2_commit [$txn2 commit] 0
-
- error_check_good get_a [$db get "a"] "{a {}}"
-
- error_check_good close [$db close] 0
-
- set txn3 [$e txn]
- error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
-
- set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
- error_check_good db [is_valid_db $db] TRUE
-
- error_check_good put2 [$db put -txn $txn3 "b" ""] 0
- error_check_good sync [$db sync] 0
- error_check_good txn3_abort [$txn3 abort] 0
- error_check_good dbclose [$db close] 0
- error_check_good eclose [$e close] 0
-
- puts "\tTxn005.c: Run recovery"
- set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $result."
- }
-
- puts "\tTxn005.d: Check data"
- set e [berkdb_env -txn -home $testdir]
- error_check_good env_open [is_valid_env $e] TRUE
-
- set db [berkdb_open -env $e -auto_commit -btree txn005.db]
- error_check_good db [is_valid_db $db] TRUE
-
- error_check_good get_a [$db get "a"] "{a {}}"
- error_check_bad get_b [$db get "b"] "{b {}}"
- error_check_good dbclose [$db close] 0
- error_check_good eclose [$e close] 0
-}
diff --git a/storage/bdb/test/txn006.tcl b/storage/bdb/test/txn006.tcl
deleted file mode 100644
index 14ada718e58..00000000000
--- a/storage/bdb/test/txn006.tcl
+++ /dev/null
@@ -1,47 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn006.tcl,v 1.7 2004/01/28 03:36:33 bostic Exp $
-#
-#
-#TEST txn006
-#TEST Test dump/load in transactional environment.
-proc txn006 { { iter 50 } } {
- source ./include.tcl
- set testfile txn006.db
-
- puts "Txn006: Test dump/load in transaction environment"
- env_cleanup $testdir
-
- puts "\tTxn006.a: Create environment and database"
- # Open/create the txn region
- set e [berkdb_env -create -home $testdir -txn]
- error_check_good env_open [is_valid_env $e] TRUE
-
- # Open/create database
- set db [berkdb_open -auto_commit -env $e \
- -create -btree -dup $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # Start a transaction
- set txn [$e txn]
- error_check_good txn [is_valid_txn $txn $e] TRUE
-
- puts "\tTxn006.b: Put data"
- # Put some data
- for { set i 1 } { $i < $iter } { incr i } {
- error_check_good put [$db put -txn $txn key$i data$i] 0
- }
-
- # End transaction, close db
- error_check_good txn_commit [$txn commit] 0
- error_check_good db_close [$db close] 0
- error_check_good env_close [$e close] 0
-
- puts "\tTxn006.c: dump/load"
- # Dump and load
- exec $util_path/db_dump -p -h $testdir $testfile | \
- $util_path/db_load -h $testdir $testfile
-}
diff --git a/storage/bdb/test/txn007.tcl b/storage/bdb/test/txn007.tcl
deleted file mode 100644
index 2ef382b9714..00000000000
--- a/storage/bdb/test/txn007.tcl
+++ /dev/null
@@ -1,57 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn007.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $
-#
-#TEST txn007
-#TEST Test of DB_TXN_WRITE_NOSYNC
-proc txn007 { { iter 50 } } {
- source ./include.tcl
- set testfile txn007.db
-
- puts "Txn007: DB_TXN_WRITE_NOSYNC"
- env_cleanup $testdir
-
- # Open/create the txn region
- puts "\tTxn007.a: Create env and database with -wrnosync"
- set e [berkdb_env -create -home $testdir -txn -wrnosync]
- error_check_good env_open [is_valid_env $e] TRUE
-
- # Open/create database
- set db [berkdb open -auto_commit -env $e \
- -create -btree -dup $testfile]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # Put some data
- puts "\tTxn007.b: Put $iter data items in individual transactions"
- for { set i 1 } { $i < $iter } { incr i } {
- # Start a transaction
- set txn [$e txn]
- error_check_good txn [is_valid_txn $txn $e] TRUE
- $db put -txn $txn key$i data$i
- error_check_good txn_commit [$txn commit] 0
- }
- set stat [$e log_stat]
- puts "\tTxn007.c: Check log stats"
- foreach i $stat {
- set txt [lindex $i 0]
- if { [string equal $txt {Times log written}] == 1 } {
- set wrval [lindex $i 1]
- }
- if { [string equal $txt {Times log flushed}] == 1 } {
- set syncval [lindex $i 1]
- }
- }
- error_check_good wrval [expr $wrval >= $iter] 1
- #
- # We should have written at least 'iter' number of times,
- # but not synced on any of those.
- #
- set val [expr $wrval - $iter]
- error_check_good syncval [expr $syncval <= $val] 1
-
- error_check_good db_close [$db close] 0
- error_check_good env_close [$e close] 0
-}
diff --git a/storage/bdb/test/txn008.tcl b/storage/bdb/test/txn008.tcl
deleted file mode 100644
index 8c89296f449..00000000000
--- a/storage/bdb/test/txn008.tcl
+++ /dev/null
@@ -1,32 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn008.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $
-#
-
-# TEST txn008
-# TEST Test of wraparound txnids (txn002)
-proc txn008 { } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- set orig_curid $txn_curid
- set orig_maxid $txn_maxid
- puts "\tTxn008.1: wraparound txnids"
- set txn_curid [expr $txn_maxid - 2]
- txn002 "008.1"
- puts "\tTxn008.2: closer wraparound txnids"
- set txn_curid [expr $txn_maxid - 3]
- set txn_maxid [expr $txn_maxid - 2]
- txn002 "008.2"
-
- puts "\tTxn008.3: test wraparound txnids"
- txn_idwrap_check $testdir
- set txn_curid $orig_curid
- set txn_maxid $orig_maxid
- return
-}
-
diff --git a/storage/bdb/test/txn009.tcl b/storage/bdb/test/txn009.tcl
deleted file mode 100644
index b45538d7545..00000000000
--- a/storage/bdb/test/txn009.tcl
+++ /dev/null
@@ -1,32 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn009.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $
-#
-
-# TEST txn009
-# TEST Test of wraparound txnids (txn003)
-proc txn009 { } {
- source ./include.tcl
- global txn_curid
- global txn_maxid
-
- set orig_curid $txn_curid
- set orig_maxid $txn_maxid
- puts "\tTxn009.1: wraparound txnids"
- set txn_curid [expr $txn_maxid - 2]
- txn003 "009.1"
- puts "\tTxn009.2: closer wraparound txnids"
- set txn_curid [expr $txn_maxid - 3]
- set txn_maxid [expr $txn_maxid - 2]
- txn003 "009.2"
-
- puts "\tTxn009.3: test wraparound txnids"
- txn_idwrap_check $testdir
- set txn_curid $orig_curid
- set txn_maxid $orig_maxid
- return
-}
-
diff --git a/storage/bdb/test/txnscript.tcl b/storage/bdb/test/txnscript.tcl
deleted file mode 100644
index 980f6ed5118..00000000000
--- a/storage/bdb/test/txnscript.tcl
+++ /dev/null
@@ -1,67 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txnscript.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $
-#
-# Txn003 script - outstanding child prepare script
-# Usage: txnscript envcmd dbcmd gidf key data
-# envcmd: command to open env
-# dbfile: name of database file
-# gidf: name of global id file
-# key: key to use
-# data: new data to use
-
-source ./include.tcl
-source $test_path/test.tcl
-source $test_path/testutils.tcl
-
-set usage "txnscript envcmd dbfile gidfile key data"
-
-# Verify usage
-if { $argc != 5 } {
- puts stderr "FAIL:[timestamp] Usage: $usage"
- exit
-}
-
-# Initialize arguments
-set envcmd [ lindex $argv 0 ]
-set dbfile [ lindex $argv 1 ]
-set gidfile [ lindex $argv 2 ]
-set key [ lindex $argv 3 ]
-set data [ lindex $argv 4 ]
-
-set dbenv [eval $envcmd]
-error_check_good envopen [is_valid_env $dbenv] TRUE
-
-set usedb 1
-set db [berkdb_open -auto_commit -env $dbenv $dbfile]
-error_check_good dbopen [is_valid_db $db] TRUE
-
-puts "\tTxnscript.a: begin parent and child txn"
-set parent [$dbenv txn]
-error_check_good parent [is_valid_txn $parent $dbenv] TRUE
-set child [$dbenv txn -parent $parent]
-error_check_good parent [is_valid_txn $child $dbenv] TRUE
-
-puts "\tTxnscript.b: Modify data"
-error_check_good db_put [$db put -txn $child $key $data] 0
-
-set gfd [open $gidfile w+]
-set gid [make_gid txnscript:$parent]
-puts $gfd $gid
-puts "\tTxnscript.c: Prepare parent only"
-error_check_good txn_prepare:$parent [$parent prepare $gid] 0
-close $gfd
-
-puts "\tTxnscript.d: Check child handle"
-set stat [catch {$child abort} ret]
-error_check_good child_handle $stat 1
-error_check_good child_h2 [is_substr $ret "invalid command name"] 1
-
-#
-# We do not close the db or env, but exit with the txns outstanding.
-#
-puts "\tTxnscript completed successfully"
-flush stdout
diff --git a/storage/bdb/test/update.tcl b/storage/bdb/test/update.tcl
deleted file mode 100644
index 85c1a80071f..00000000000
--- a/storage/bdb/test/update.tcl
+++ /dev/null
@@ -1,93 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: update.tcl,v 11.13 2004/01/28 03:36:33 bostic Exp $
-
-source ./include.tcl
-global update_dir
-set update_dir "$test_path/update_test"
-
-proc update { } {
- source ./include.tcl
- global update_dir
-
- foreach version [glob $update_dir/*] {
- regexp \[^\/\]*$ $version version
- foreach method [glob $update_dir/$version/*] {
- regexp \[^\/\]*$ $method method
- foreach file [glob $update_dir/$version/$method/*] {
- regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
- foreach endianness {"le" "be"} {
- puts "Update:\
- $version $method $name $endianness"
- set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
- if { $ret != 0 } {
- puts $message
- }
- }
- }
- }
- }
-}
-
-proc _update { source_dir temp_dir \
- version method file endianness do_db_load_test do_update_test } {
- source include.tcl
- global errorInfo
-
- cleanup $temp_dir NULL
-
- exec sh -c \
-"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
-
- if { $do_db_load_test } {
- set ret [catch \
- {exec $util_path/db_load -f "$temp_dir/$file.dump" \
- "$temp_dir/update.db"} message]
- error_check_good \
- "Update load: $version $method $file $message" $ret 0
-
- set ret [catch \
- {exec $util_path/db_dump -f "$temp_dir/update.dump" \
- "$temp_dir/update.db"} message]
- error_check_good \
- "Update dump: $version $method $file $message" $ret 0
-
- error_check_good "Update diff.1.1: $version $method $file" \
- [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
- error_check_good \
- "Update diff.1.2: $version $method $file" $ret ""
- }
-
- if { $do_update_test } {
- set ret [catch \
- {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
- if { $ret == 1 } {
- if { ![is_substr $errorInfo "version upgrade"] } {
- set fnl [string first "\n" $errorInfo]
- set theError \
- [string range $errorInfo 0 [expr $fnl - 1]]
- error $theError
- }
- } else {
- error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good dbclose [$db close] 0
-
- set ret [catch \
- {exec $util_path/db_dump -f \
- "$temp_dir/update.dump" \
- "$temp_dir/$file-$endianness.db"} message]
- error_check_good "Update\
- dump: $version $method $file $message" $ret 0
-
- error_check_good \
- "Update diff.2: $version $method $file" \
- [filecmp "$temp_dir/$file.dump" \
- "$temp_dir/update.dump"] 0
- error_check_good \
- "Update diff.2: $version $method $file" $ret ""
- }
- }
-}
diff --git a/storage/bdb/test/upgrade.tcl b/storage/bdb/test/upgrade.tcl
deleted file mode 100644
index 0043c353afc..00000000000
--- a/storage/bdb/test/upgrade.tcl
+++ /dev/null
@@ -1,745 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1999-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: upgrade.tcl,v 11.37 2004/10/27 20:29:29 carol Exp $
-
-source ./include.tcl
-
-global upgrade_dir
-# set upgrade_dir "$test_path/upgrade_test"
-set upgrade_dir "$test_path/upgrade/databases"
-
-global gen_upgrade
-set gen_upgrade 0
-global gen_chksum
-set gen_chksum 0
-global gen_upgrade_log
-set gen_upgrade_log 0
-
-global upgrade_dir
-global upgrade_be
-global upgrade_method
-global upgrade_name
-
-proc upgrade { { archived_test_loc "DEFAULT" } } {
- source ./include.tcl
- global upgrade_dir
- global tcl_platform
- global saved_logvers
-
- set saved_upgrade_dir $upgrade_dir
-
- # Identify endianness of the machine running upgrade.
- if { [big_endian] == 1 } {
- set myendianness be
- } else {
- set myendianness le
- }
- set e $tcl_platform(byteOrder)
-
- if { [file exists $archived_test_loc/logversion] == 1 } {
- set fd [open $archived_test_loc/logversion r]
- set saved_logvers [read $fd]
- close $fd
- } else {
- puts "Old log version number must be available \
- in $archived_test_loc/logversion"
- return
- }
-
- fileremove -f UPGRADE.OUT
- set o [open UPGRADE.OUT a]
-
- puts -nonewline $o "Upgrade test started at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- puts $o [berkdb version -string]
- puts $o "Testing $e files"
-
- puts -nonewline "Upgrade test started at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts [berkdb version -string]
- puts "Testing $e files"
-
- if { $archived_test_loc == "DEFAULT" } {
- puts $o "Using default archived databases in $upgrade_dir."
- puts "Using default archived databases in $upgrade_dir."
- } else {
- set upgrade_dir $archived_test_loc
- puts $o "Using archived databases in $upgrade_dir."
- puts "Using archived databases in $upgrade_dir."
- }
- close $o
-
- foreach version [glob $upgrade_dir/*] {
- if { [string first CVS $version] != -1 } { continue }
- regexp \[^\/\]*$ $version version
-
- # Test only files where the endianness of the db matches
- # the endianness of the test platform. These are the
- # meaningful tests:
- # 1. File generated on le, tested on le
- # 2. File generated on be, tested on be
- # 3. Byte-swapped file generated on le, tested on be
- # 4. Byte-swapped file generated on be, tested on le
- #
- set dbendianness [string range $version end-1 end]
- if { [string compare $myendianness $dbendianness] != 0 } {
- puts "Skipping test of $version \
- on $myendianness platform."
- } else {
- set release [string trim $version -lbe]
- set o [open UPGRADE.OUT a]
- puts $o "Files created on release $release"
- close $o
- puts "Files created on release $release"
-
- foreach method [glob $upgrade_dir/$version/*] {
- regexp \[^\/\]*$ $method method
- set o [open UPGRADE.OUT a]
- puts $o "\nTesting $method files"
- close $o
- puts "\tTesting $method files"
-
- foreach file [lsort -dictionary \
- [glob -nocomplain \
- $upgrade_dir/$version/$method/*]] {
- regexp (\[^\/\]*)\.tar\.gz$ \
- $file dummy name
-
- cleanup $testdir NULL 1
- set curdir [pwd]
- cd $testdir
- set tarfd [open "|tar xf -" w]
- cd $curdir
-
- catch {exec gunzip -c \
- "$upgrade_dir/$version/$method/$name.tar.gz" \
- >@$tarfd}
- close $tarfd
-
- set f [open $testdir/$name.tcldump \
- {RDWR CREAT}]
- close $f
-
- # We exec a separate tclsh for each
- # separate subtest to keep the
- # testing process from consuming a
- # tremendous amount of memory.
- #
- # First we test the .db files.
- if { [file exists \
- $testdir/$name-$myendianness.db] } {
- if { [catch {exec $tclsh_path \
- << "source \
- $test_path/test.tcl;\
- _upgrade_test $testdir \
- $version $method $name \
- $myendianness" >>& \
- UPGRADE.OUT } message] } {
- set o [open \
- UPGRADE.OUT a]
- puts $o "FAIL: $message"
- close $o
- }
- if { [catch {exec $tclsh_path\
- << "source \
- $test_path/test.tcl;\
- _db_load_test $testdir \
- $version $method $name" >>&\
- UPGRADE.OUT } message] } {
- set o [open \
- UPGRADE.OUT a]
- puts $o "FAIL: $message"
- close $o
- }
- }
- # Then we test log files.
- if { [file exists \
- $testdir/$name.prlog] } {
- if { [catch {exec $tclsh_path \
- << "source \
- $test_path/test.tcl;\
- global saved_logvers;\
- set saved_logvers \
- $saved_logvers;\
- _log_test $testdir \
- $release $method \
- $name" >>& \
- UPGRADE.OUT } message] } {
- set o [open \
- UPGRADE.OUT a]
- puts $o "FAIL: $message"
- close $o
- }
- }
- }
- }
- }
- }
- set upgrade_dir $saved_upgrade_dir
-
- set o [open UPGRADE.OUT a]
- puts -nonewline $o "Completed at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- close $o
-
- puts -nonewline "Completed at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
-
- # Don't provide a return value.
- return
-}
-
-proc _upgrade_test { temp_dir version method file endianness } {
- source include.tcl
- global errorInfo
- global encrypt
-
- puts "Upgrade: $version $method $file $endianness"
-
- # Check whether we're working with an encrypted file.
- if { [string match c-* $file] } {
- set encrypt 1
- }
- set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
- error_check_good dbupgrade $ret 0
-
- error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
-
- upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
-
- error_check_good "Upgrade diff.$endianness: $version $method $file" \
- [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
-}
-
-proc _db_load_test { temp_dir version method file } {
- source include.tcl
- global errorInfo
-
- puts "Db_load: $version $method $file"
-
- set ret [catch \
- {exec $util_path/db_load -f "$temp_dir/$file.dump" \
- "$temp_dir/upgrade.db"} message]
- error_check_good \
- "Upgrade load: $version $method $file $message" $ret 0
-
- upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
-
- error_check_good "Upgrade diff.1.1: $version $method $file" \
- [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
-}
-
-proc _log_test { temp_dir release method file } {
- source ./include.tcl
- global saved_logvers
- global passwd
- puts "Check log file: $temp_dir $release $method $file"
-
- # Get log version number of current system
- set env [berkdb_env -create -log -home $testdir]
- error_check_good is_valid_env [is_valid_env $env] TRUE
- set current_logvers [get_log_vers $env]
- error_check_good env_close [$env close] 0
- error_check_good env_remove [berkdb envremove -home $testdir] 0
-
- # Rename recd001-x-log.000000000n to log.000000000n.
- set logfiles [glob -nocomplain $temp_dir/*log.0*]
- foreach logfile $logfiles {
- set logname [string replace $logfile 0 \
- [string last - $logfile]]
- file rename -force $logfile $temp_dir/$logname
- }
-
- # Use db_printlog to dump the logs. If the current log file
- # version is greater than the saved log file version, the log
- # files are expected to be unreadable. If the log file is
- # readable, check that the current printlog dump matches the
- # archived printlog.
- #
- set ret [catch {exec $util_path/db_printlog -h $temp_dir \
- > $temp_dir/logs.prlog} message]
- if { [is_substr $message "magic number"] } {
- # The failure is probably due to encryption, try
- # crypto printlog.
- set ret [catch {exec $util_path/db_printlog -h $temp_dir \
- -P $passwd > $temp_dir/logs.prlog} message]
- if { $ret == 1 } {
- # If the failure is because of a historic
- # log version, that's okay.
- if { $current_logvers <= $saved_logvers } {
- puts "db_printlog failed: $message"
- }
- }
- }
-
- if { $current_logvers > $saved_logvers } {
- error_check_good historic_log_version \
- [is_substr $message "historic log version"] 1
- } else {
- error_check_good db_printlog:$message $ret 0
- # Compare logs.prlog and $file.prlog (should match)
- error_check_good "Compare printlogs" [filecmp \
- "$temp_dir/logs.prlog" "$temp_dir/$file.prlog"] 0
- }
-}
-
-proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } {
- global gen_upgrade
- global gen_upgrade_log
- global gen_chksum
- global upgrade_dir
- global upgrade_be
- global upgrade_method
- global upgrade_name
- global test_names
- global parms
- global encrypt
- global passwd
- source ./include.tcl
-
- set upgrade_dir $dir
- env_cleanup $testdir
-
- fileremove -f GENERATE.OUT
- set o [open GENERATE.OUT a]
-
- puts -nonewline $o "Generating upgrade files. Started at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- puts $o [berkdb version -string]
-
- puts -nonewline "Generating upgrade files. Started at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- puts [berkdb version -string]
-
- close $o
-
- # Create a file that contains the log version number.
- # If necessary, create the directory to contain the file.
- set env [berkdb_env -create -log -home $testdir]
- error_check_good is_valid_env [is_valid_env $env] TRUE
-
- if { [file exists $dir] == 0 } {
- file mkdir $dir
- }
- set lv [open $dir/logversion w]
- puts $lv [get_log_vers $env]
- close $lv
-
- error_check_good env_close [$env close] 0
-
- # Generate test databases for each access method and endianness.
- set gen_upgrade 1
- foreach method \
- "btree rbtree hash recno rrecno frecno queue queueext" {
- set o [open GENERATE.OUT a]
- puts $o "\nGenerating $method files"
- close $o
- puts "\tGenerating $method files"
- set upgrade_method $method
-#set test_names(test) ""
- foreach test $test_names(test) {
- if { [info exists parms($test)] != 1 } {
- continue
- }
-
- set o [open GENERATE.OUT a]
- puts $o "\t\tGenerating files for $test"
- close $o
- puts "\t\tGenerating files for $test"
-
- if { $save_non_crypto == 1 } {
- set encrypt 0
- foreach upgrade_be { 0 1 } {
- set upgrade_name $test
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl;\
- global gen_upgrade upgrade_be;\
- global upgrade_method upgrade_name;\
- global encrypt;\
- set encrypt $encrypt;\
- set gen_upgrade 1;\
- set upgrade_be $upgrade_be;\
- set upgrade_method $upgrade_method;\
- set upgrade_name $upgrade_name;\
- run_method -$method $test" \
- >>& GENERATE.OUT} res] {
- puts "FAIL: run_method \
- $test $method"
- }
- cleanup $testdir NULL 1
- }
- # Save checksummed files for only one test.
- # Checksumming should work in all or no cases.
- set gen_chksum 1
- foreach upgrade_be { 0 1 } {
- set upgrade_name $test
- if { $test == "test001" } {
- if { [catch {exec $tclsh_path \
- << "source $test_path/test.tcl;\
- global gen_upgrade;\
- global upgrade_be;\
- global upgrade_method;\
- global upgrade_name;\
- global encrypt gen_chksum;\
- set encrypt $encrypt;\
- set gen_upgrade 1;\
- set gen_chksum 1;\
- set upgrade_be $upgrade_be;\
- set upgrade_method \
- $upgrade_method;\
- set upgrade_name \
- $upgrade_name;\
- run_method -$method $test \
- 0 1 stdout -chksum" \
- >>& GENERATE.OUT} res] } {
- puts "FAIL: run_method \
- $test $method \
- -chksum: $res"
- }
- cleanup $testdir NULL 1
- }
- }
- set gen_chksum 0
- }
- # Save encrypted db's only of native endianness.
- # Encrypted files are not portable across endianness.
- if { $save_crypto == 1 } {
- set upgrade_be [big_endian]
- set encrypt 1
- set upgrade_name $test
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl;\
- global gen_upgrade upgrade_be;\
- global upgrade_method upgrade_name;\
- global encrypt passwd;\
- set encrypt $encrypt;\
- set passwd $passwd;\
- set gen_upgrade 1;\
- set upgrade_be $upgrade_be;\
- set upgrade_method $upgrade_method;\
- set upgrade_name $upgrade_name;\
- run_secmethod $method $test" \
- >>& GENERATE.OUT} res] {
- puts "FAIL: run_secmethod \
- $test $method"
- }
- cleanup $testdir NULL 1
- }
- }
- }
- set gen_upgrade 0
- # Set upgrade_be to the native value so log files go to the
- # right place.
- set upgrade_be [big_endian]
-
- # Generate log files.
- set o [open GENERATE.OUT a]
- puts $o "\tGenerating log files"
- close $o
- puts "\tGenerating log files"
-
- set gen_upgrade_log 1
- # Pass the global variables and their values to the new tclsh.
- if { $save_non_crypto == 1 } {
- set encrypt 0
- if [catch {exec $tclsh_path << "source $test_path/test.tcl;\
- global gen_upgrade_log upgrade_be upgrade_dir;\
- global encrypt;\
- set encrypt $encrypt;\
- set gen_upgrade_log $gen_upgrade_log; \
- set upgrade_be $upgrade_be;\
- set upgrade_dir $upgrade_dir;\
- run_recds" >>& GENERATE.OUT} res] {
- puts "FAIL: run_recds: $res"
- }
- }
- if { $save_crypto == 1 } {
- set encrypt 1
- if [catch {exec $tclsh_path << "source $test_path/test.tcl;\
- global gen_upgrade_log upgrade_be upgrade_dir;\
- global encrypt;\
- set encrypt $encrypt;\
- set gen_upgrade_log $gen_upgrade_log; \
- set upgrade_be $upgrade_be;\
- set upgrade_dir $upgrade_dir;\
- run_recds " >>& GENERATE.OUT} res] {
- puts "FAIL: run_recds with crypto: $res"
- }
- }
- set gen_upgrade_log 0
-
- set o [open GENERATE.OUT a]
- puts -nonewline $o "Completed at: "
- puts $o [clock format [clock seconds] -format "%H:%M %D"]
- puts -nonewline "Completed at: "
- puts [clock format [clock seconds] -format "%H:%M %D"]
- close $o
-}
-
-proc save_upgrade_files { dir } {
- global upgrade_dir
- global upgrade_be
- global upgrade_method
- global upgrade_name
- global gen_upgrade
- global gen_upgrade_log
- global encrypt
- global gen_chksum
- global passwd
- source ./include.tcl
-
- set vers [berkdb version]
- set maj [lindex $vers 0]
- set min [lindex $vers 1]
-
- # Is this machine big or little endian? We want to mark
- # the test directories appropriately, since testing
- # little-endian databases generated by a big-endian machine,
- # and/or vice versa, is interesting.
- if { [big_endian] } {
- set myendianness be
- } else {
- set myendianness le
- }
-
- if { $upgrade_be == 1 } {
- set version_dir "$myendianness-$maj.${min}be"
- set en be
- } else {
- set version_dir "$myendianness-$maj.${min}le"
- set en le
- }
-
- set dest $upgrade_dir/$version_dir/$upgrade_method
- exec mkdir -p $dest
-
- if { $gen_upgrade == 1 } {
- # Save db files from test001 - testxxx.
- set dbfiles [glob -nocomplain $dir/*.db]
- set dumpflag ""
- # Encrypted files are identified by the prefix "c-".
- if { $encrypt == 1 } {
- set upgrade_name c-$upgrade_name
- set dumpflag " -P $passwd "
- }
- # Checksummed files are identified by the prefix "s-".
- if { $gen_chksum == 1 } {
- set upgrade_name s-$upgrade_name
- }
- foreach dbfile $dbfiles {
- set basename [string range $dbfile \
- [expr [string length $dir] + 1] end-3]
-
- set newbasename $upgrade_name-$basename
-
- # db_dump file
- if { [catch {eval exec $util_path/db_dump -k $dumpflag \
- $dbfile > $dir/$newbasename.dump} res] } {
- puts "FAIL: $res"
- }
-
- # tcl_dump file
- upgrade_dump $dbfile $dir/$newbasename.tcldump
-
- # Rename dbfile and any dbq files.
- file rename $dbfile $dir/$newbasename-$en.db
- foreach dbq \
- [glob -nocomplain $dir/__dbq.$basename.db.*] {
- set s [string length $dir/__dbq.]
- set newname [string replace $dbq $s \
- [expr [string length $basename] + $s - 1] \
- $newbasename-$en]
- file rename $dbq $newname
- }
- set cwd [pwd]
- cd $dir
- catch {eval exec tar -cvf $dest/$newbasename.tar \
- [glob $newbasename* __dbq.$newbasename-$en.db.*]}
- catch {exec gzip -9v $dest/$newbasename.tar} res
- cd $cwd
- }
- }
-
- if { $gen_upgrade_log == 1 } {
- # Save log files from recd tests.
- set logfiles [glob -nocomplain $dir/log.*]
- if { [llength $logfiles] > 0 } {
- # More than one log.0000000001 file may be produced
- # per recd test, so we generate unique names:
- # recd001-0-log.0000000001, recd001-1-log.0000000001,
- # and so on.
- # We may also have log.0000000001, log.0000000002,
- # and so on, and they will all be dumped together
- # by db_printlog.
- set count 0
- while { [file exists \
- $dest/$upgrade_name-$count-log.tar.gz] \
- == 1 } {
- incr count
- }
- set newname $upgrade_name-$count-log
-
- # Run db_printlog on all the log files
- if {[catch {exec $util_path/db_printlog -h $dir > \
- $dir/$newname.prlog} res] != 0} {
- puts "Regular printlog failed, try encryption"
- eval {exec $util_path/db_printlog} -h $dir \
- -P $passwd > $dir/$newname.prlog
- }
-
- # Rename each log file so we can identify which
- # recd test created it.
- foreach logfile $logfiles {
- set lognum [string range $logfile \
- end-9 end]
- file rename $logfile $dir/$newname.$lognum
- }
-
- set cwd [pwd]
- cd $dir
-
- catch {eval exec tar -cvf $dest/$newname.tar \
- [glob $newname*]}
- catch {exec gzip -9v $dest/$newname.tar}
- cd $cwd
- }
- }
-}
-
-proc upgrade_dump { database file {stripnulls 0} } {
- global errorInfo
- global encrypt
- global passwd
-
- set encargs ""
- if { $encrypt == 1 } {
- set encargs " -encryptany $passwd "
- }
- set db [eval {berkdb open} -rdonly $encargs $database]
- set dbc [$db cursor]
-
- set f [open $file w+]
- fconfigure $f -encoding binary -translation binary
-
- #
- # Get a sorted list of keys
- #
- set key_list ""
- set pair [$dbc get -first]
-
- while { 1 } {
- if { [llength $pair] == 0 } {
- break
- }
- set k [lindex [lindex $pair 0] 0]
- lappend key_list $k
- set pair [$dbc get -next]
- }
-
- # Discard duplicated keys; we now have a key for each
- # duplicate, not each unique key, and we don't want to get each
- # duplicate multiple times when we iterate over key_list.
- set uniq_keys ""
- foreach key $key_list {
- if { [info exists existence_list($key)] == 0 } {
- lappend uniq_keys $key
- }
- set existence_list($key) 1
- }
- set key_list $uniq_keys
-
- set key_list [lsort -command _comp $key_list]
-
- #
- # Get the data for each key
- #
- set i 0
- foreach key $key_list {
- set pair [$dbc get -set $key]
- if { $stripnulls != 0 } {
- # the Tcl interface to db versions before 3.X
- # added nulls at the end of all keys and data, so
- # we provide functionality to strip that out.
- set key [strip_null $key]
- }
- set data_list {}
- catch { while { [llength $pair] != 0 } {
- set data [lindex [lindex $pair 0] 1]
- if { $stripnulls != 0 } {
- set data [strip_null $data]
- }
- lappend data_list [list $data]
- set pair [$dbc get -nextdup]
- } }
- #lsort -command _comp data_list
- set data_list [lsort -command _comp $data_list]
- puts -nonewline $f [binary format i [string length $key]]
- puts -nonewline $f $key
- puts -nonewline $f [binary format i [llength $data_list]]
- for { set j 0 } { $j < [llength $data_list] } { incr j } {
- puts -nonewline $f [binary format i [string length \
- [concat [lindex $data_list $j]]]]
- puts -nonewline $f [concat [lindex $data_list $j]]
- }
- if { [llength $data_list] == 0 } {
- puts "WARNING: zero-length data list"
- }
- incr i
- }
-
- close $f
- error_check_good upgrade_dump_c_close [$dbc close] 0
- error_check_good upgrade_dump_db_close [$db close] 0
-}
-
-proc _comp { a b } {
- if { 0 } {
- # XXX
- set a [strip_null [concat $a]]
- set b [strip_null [concat $b]]
- #return [expr [concat $a] < [concat $b]]
- } else {
- set an [string first "\0" $a]
- set bn [string first "\0" $b]
-
- if { $an != -1 } {
- set a [string range $a 0 [expr $an - 1]]
- }
- if { $bn != -1 } {
- set b [string range $b 0 [expr $bn - 1]]
- }
- }
- #puts "$a $b"
- return [string compare $a $b]
-}
-
-proc strip_null { str } {
- set len [string length $str]
- set last [expr $len - 1]
-
- set termchar [string range $str $last $last]
- if { [string compare $termchar \0] == 0 } {
- set ret [string range $str 0 [expr $last - 1]]
- } else {
- set ret $str
- }
-
- return $ret
-}
-
-proc get_log_vers { env } {
- set stat [$env log_stat]
- foreach pair $stat {
- set msg [lindex $pair 0]
- set val [lindex $pair 1]
- if { $msg == "Log file Version" } {
- return $val
- }
- }
- puts "FAIL: Log file Version not found in log_stat"
- return 0
-}
-
diff --git a/storage/bdb/test/wordlist b/storage/bdb/test/wordlist
deleted file mode 100644
index 03ea15f7277..00000000000
--- a/storage/bdb/test/wordlist
+++ /dev/null
@@ -1,10001 +0,0 @@
-cooperate
-benighted
-apologist's
-addresser
-cataract
-colonially
-atoned
-avow
-bathroom
-anaesthesia
-columnated
-bogs
-astral
-barbed
-captives
-acclaims
-adjutants
-affidavits
-baptisms
-bubbling
-classic
-allaying
-component
-battlement
-backtrack
-
-courage
-bore
-advertisement
-attests
-bunny's
-airlifts
-cajole
-cataloging
-airily
-collected
-abridged
-compel
-aftermath
-barrow
-approve
-chillier
-bequest
-attendant
-abjures
-adjudication
-banished
-asymptotes
-borrower
-caustic
-claim
-cohabitation
-corporacies
-buoy
-benchmark's
-averting
-anecdote's
-caress
-annihilate
-cajoles
-anywhere
-apparitions
-coves
-bribed
-casually
-clue's
-asserted
-architects
-abstained
-attitude
-accumulating
-coalesced
-angelic
-agnostic
-breathed
-bother
-congregating
-amatory
-caging
-countryside
-chapel
-buttonhole
-bartenders
-bridging
-bombardment
-accurately
-confirmed
-alleviated
-acquiring
-bruise
-antelope
-albums
-allusive
-corker
-cavity's
-compliment
-climb
-caterpillar
-almond
-authenticated
-balkan
-assembly's
-acidity
-abases
-bonny
-been
-abbots
-abductor's
-aerials
-cancels
-chalked
-beeps
-affirms
-contrariness
-clearest
-appropriations
-critiquing
-affluence
-bouts
-abiding
-comprises
-brunches
-biology
-conceptualization's
-assaying
-abutter
-adorable
-beatable
-appenders
-aggressors
-agrarian
-bottleneck
-angled
-beholds
-bereaved
-creation
-animated
-candied
-bar
-aeronautics
-cousin's
-cleaver
-alienation
-billet
-bungler
-contention
-businessman
-braids
-assert
-boisterous
-consolidate
-breathing
-ballot
-averted
-conscientiously
-bellow
-brazenness
-coaches
-bulldog
-classify
-checksum
-almond's
-cornered
-caskets
-capacitors
-beefer
-connoisseurs
-consisted
-adore
-circumvented
-colonels
-addenda
-boost
-compatibility's
-bumblebee
-commonest
-containment
-active
-absorption's
-creaks
-administer
-beset
-aborted
-aforesaid
-aridity
-broken
-azimuths
-aerial
-addition's
-aggrieve
-anthology
-circuitous
-checks
-alley's
-beam
-boss
-corrupting
-absolutes
-asteroid's
-bandstands
-beatitude's
-analogue's
-busts
-confession
-bedstead
-affairs
-blackmailers
-collared
-buckboard
-assassin
-accessor
-adjudging
-binders
-constituent's
-blister
-aromas
-approved
-absorbent
-barbarously
-cat's
-builder
-brandish
-assailing
-constitute
-christening
-acutely
-amount
-blurry
-blocks
-advertise
-chain
-brigade's
-confusion
-beds
-arrangers
-colonizers
-beautifying
-bankruptcy
-bedazzles
-candidates
-clearness
-admonishment's
-behind
-abbreviations
-basting
-ballasts
-amateurism
-celled
-constituted
-bonfire
-bugled
-advisee's
-battled
-budded
-burners
-causeway's
-calibrate
-brambly
-befuddles
-azure
-busiest
-admiringly
-appropriator
-accumulator
-cables
-abhor
-civil
-botulinus
-creaked
-bismuth
-astronomical
-abscissas
-bodice
-aunt
-cascades
-cares
-comradeship
-assemblages
-boater
-bellmen
-admission's
-ambitious
-baldness
-abortive
-controlled
-chinked
-coded
-courtrooms
-arteriolar
-cooler's
-cared
-brewer
-christians
-barbecues
-contacts
-blackjack's
-buzzing
-blasters
-accords
-braziers
-allegretto
-catered
-breveting
-cleaning
-amicably
-bummed
-consulted
-allegro's
-accumulator's
-compartmented
-condemned
-concludes
-bitwise
-cheered
-appropriator's
-accessors
-casting
-carolina's
-accompanying
-budding
-correspond
-bach's
-angel's
-bearing
-arresters
-biweekly
-character
-badgering
-cantankerous
-avalanching
-adjudges
-barometer
-append
-continuations
-burped
-boxtop's
-abstention
-amp
-axiomatized
-bimonthlies
-aghast
-arresting
-breakwater's
-continuing
-bridle
-bobbin's
-antagonistically
-blindly
-biochemical
-biologically
-antifundamentalist
-confer
-cloudiness
-bonded
-comfortingly
-caption
-blackmailed
-bidders
-breakpoint
-brigadier
-criminals
-coyotes
-casserole's
-annex
-cereals
-breadboxes
-belgian
-conductivity
-counterexample
-anarchist
-couches
-atavistic
-clipped
-button
-axiomatic
-capping
-correcting
-chase
-chastise
-angle
-burnished
-beauteously
-antipodes
-crippling
-crowns
-amends
-bah
-brigadiers
-alleged
-correctives
-bristles
-buzzards
-barbs
-bagel
-adaptation
-caliber
-browner
-apprehensions
-bonnet
-anachronistically
-composites
-bothered
-assurer
-arc
-chaser
-bastards
-calmed
-bunches
-apocalypse
-countably
-crowned
-contrivance
-boomerang's
-airplane's
-boarded
-consumption
-attuning
-blamed
-cooing
-annihilation
-abused
-absence
-coin
-coronaries
-applicatively
-binomial
-ablates
-banishes
-boating
-companions
-bilking
-captivate
-comment
-claimants
-admonish
-ameliorated
-bankruptcies
-author
-cheat
-chocolates
-botch
-averring
-beneath
-crudely
-creeping
-acolytes
-ass's
-cheese's
-checksum's
-chillers
-bracelet
-archenemy
-assistantship
-baroque
-butterfly
-coolie's
-anecdote
-coring
-cleansing
-accreditation
-ceaselessly
-attitudes
-bag
-belong
-assented
-aped
-constrains
-balalaikas
-consent
-carpeting
-conspiracy
-allude
-contradictory
-adverb's
-constitutive
-arterial
-admirable
-begot
-affectation
-antiquate
-attribution
-competition's
-bovine
-commodores
-alerters
-abatements
-corks
-battlements
-cave
-buoys
-credible
-bowdlerizes
-connector
-amorphously
-boredom
-bashing
-creams
-arthropods
-amalgamated
-ballets
-chafe
-autograph
-age
-aid
-colleague's
-atrocious
-carbonizing
-chutes
-barbecued
-circuits
-bandages
-corporations
-beehive
-bandwagon
-accommodated
-councillor's
-belted
-airdrop
-confrontations
-chieftain's
-canonicalization
-amyl
-abjectness
-choke
-consider
-adjuster
-crossover's
-agreeing
-consolations
-capitalizers
-binges
-annihilating
-callers
-coordinate
-banshees
-biscuits
-absorbency
-corollary
-corresponded
-aristocrat's
-banally
-cruiser
-bathtub's
-abbreviated
-balkiness
-crew
-acidulous
-air
-birdies
-canvassing
-concretion
-blackjacks
-controller's
-aquarius
-charm
-clip
-awarder
-consistently
-calibrated
-bushwhacking
-avaricious
-ceaselessness
-basically
-accolades
-adduction
-commending
-consulates
-certifiable
-admire
-bankers
-appropriateness
-bandlimits
-chill
-adds
-constable
-chirping
-cologne
-cowardice
-baklava
-amusedly
-blackberry
-crises
-bedeviling
-botching
-backbend
-attaining
-continuity
-artistry
-beginner
-cleaner's
-adores
-commemorating
-amusement
-burial
-bungalow's
-abstinence
-contractually
-advancement's
-conjecture
-buckling
-conferrer
-cherub's
-belonged
-classifications
-baseball
-carbonation
-craved
-bans
-aphid
-arbor
-ague
-acropolis
-applied
-aspired
-calibrating
-abundance
-appeased
-chanted
-ascent
-convenes
-beep
-bottles
-aborigines
-clips
-acquainting
-aiming
-creditor's
-abolitionists
-cloves
-containments
-bungling
-bunt
-anchors
-brazed
-communicator's
-brew
-accumulate
-addicting
-actively
-befog
-anachronisms
-bumblers
-closest
-calculators
-absurdity
-colleagues
-college
-assesses
-conflicted
-associational
-betide
-conceptualization
-adjutant
-alliances
-corresponding
-barometers
-cot
-brooch's
-coiled
-arboreal
-convicted
-artless
-certificates
-bourbon
-astonish
-bust
-correlate
-amounts
-anal
-abstraction's
-corns
-conqueror's
-boldly
-bob's
-beer
-blanks
-corpses
-contingent
-blackly
-backed
-appearances
-cancers
-actuating
-apprehension's
-colorings
-anglicanism
-armament
-armer
-bizarre
-begotten
-actions
-archly
-capriciously
-clue
-contractor
-contributions
-agendas
-coached
-blamable
-annoyers
-coupons
-brooked
-assortment
-axes
-celebrates
-courageously
-baroqueness
-blasphemous
-asserter
-contents
-correctly
-challenged
-bulldoze
-casement
-acknowledge
-bitterness
-belongs
-allotments
-chalice's
-bequest's
-adjacent
-consumer's
-conservatively
-coalition
-background's
-backache
-befouls
-brushfire's
-analysts
-branch
-airways
-awaiting
-breakfast
-anoints
-baying
-contrary
-bilge
-chasm's
-babes
-afresh
-centerpiece's
-barked
-coffin
-assumed
-actresses
-accentuating
-aching
-abet
-balancers
-consumptively
-cagers
-backing
-angiography
-chord's
-cheapened
-bewailed
-arson
-begged
-convergent
-bowlers
-conflicting
-confiscated
-bitch
-bloody
-brushfires
-bleach
-computation's
-choppers
-circuitously
-chancing
-bunker
-concept's
-alacrity
-boyhood
-ammo
-bobwhites
-carter
-ardent
-bier
-airway's
-brownies
-aura
-cannibalizing
-confirms
-australian
-barrage
-closures
-assertive
-abstainer
-bicarbonate
-clone
-back
-cipher
-crown
-cannibalizes
-away
-crafty
-airings
-amtrak
-comical
-burnish
-continuum
-apparition
-apologizing
-blot
-blacker
-characters
-built
-apparent
-applicative
-assiduous
-attorneys
-affectionately
-bobbing
-baggy
-comic's
-attempt
-appealers
-amortize
-bonanza
-backwards
-bowers
-anemometer
-ambulance's
-creeps
-abduction's
-coal
-chiller
-adjudications
-clogging
-ascending
-bookkeeper
-crawlers
-battery's
-artifacts
-attributions
-amusements
-aftermost
-allophones
-bemoaned
-comptroller
-bugger's
-buoyancy
-booboo
-award
-amplifying
-certify
-bivariate
-attunes
-asteroidal
-chant
-collectively
-chasteness
-chapels
-copiousness
-benign
-armies
-competing
-buss
-awakened
-breakpoint's
-conceptualizing
-cleansers
-acorns
-conveyance's
-bluer
-battle
-budges
-characteristically
-be
-contour
-beguiling
-awarding
-armhole
-airship's
-bathtub
-breathable
-crowded
-compiles
-certain
-brutalizing
-bacteria
-baronies
-abode
-blacksmith
-brinkmanship
-capitalizations
-cousin
-botany
-avionic
-companion
-consists
-connoisseur's
-avalanched
-claimant's
-backstitches
-affixes
-bikes
-atomically
-cowed
-asleep
-becomingly
-acorn's
-complainers
-appreciated
-cross
-cringed
-booting
-attitudinal
-broadcasting
-childishly
-breeze's
-craven
-boll
-clause's
-burden
-appendages
-atemporal
-allah
-carnival's
-anchorage
-adjures
-besought
-abounding
-crucifying
-arrangements
-antiquarians
-burrows
-antipode
-canvas
-constable's
-coopers
-ascended
-companionship
-bakery's
-bayonets
-conclusively
-boasters
-beneficiaries
-conspicuous
-contriver
-architecture
-breakthroughs
-brownie's
-blur
-academics
-antagonist
-contemplates
-arena
-caravan's
-administers
-comprehensively
-convey
-bigot
-blitz
-bibliography's
-coerced
-assail
-amazons
-banned
-alabaster
-concluding
-bouquet
-barks
-acquaintances
-astonishment
-constraint
-backpack's
-breakthroughes
-blocking
-accomplishers
-catastrophe
-bushels
-algae
-ailment's
-anemometers
-beginning's
-chefs
-converse
-cornerstone
-astound
-assuring
-adornment
-anyone
-alumni
-club
-bestselling
-businessmen
-constructed
-attendee's
-cooped
-ablute
-chronicler
-alaska
-clam
-canonicals
-concerned
-aligned
-creek
-burrow
-allay
-admirals
-blackens
-compressing
-confirm
-cows
-battleship's
-belched
-affixing
-chalices
-choirs
-absentee's
-baseboard's
-apportionment
-adheres
-accounts
-chef
-access
-clearings
-accompanists
-concentrating
-ado
-bathos
-bailiff
-continuance
-ball
-bearer
-congress
-cites
-can't
-balloon
-crams
-consults
-bungled
-bike's
-apes
-assassinations
-colt's
-consecrate
-ancients
-chick
-analyst
-adsorbing
-burntly
-accompanist's
-apprehensive
-bengal
-boughs
-ankles
-anchored
-benefits
-accommodation
-amiss
-brink
-chewers
-blueberry's
-chairs
-adjoin
-bivalve
-autobiography's
-automated
-comparisons
-climbed
-artists
-congruent
-cold
-atonement
-cashier
-armageddon
-allocations
-bereavements
-bumblebees
-blew
-busboys
-bottoming
-alternations
-apprenticed
-bestial
-cinder's
-consumption's
-abbey's
-amended
-continued
-birefringent
-barbados
-ability's
-compulsory
-antler
-centerpieces
-accountant's
-arrogant
-ballads
-ascenders
-appliers
-adjustment's
-blabbed
-baits
-activity's
-clod's
-adjudicating
-bleak
-commutes
-bumming
-beating
-cohesiveness
-branded
-acknowledger
-communications
-blockhouses
-booklets
-consenters
-creek's
-consulting
-binary
-coaster
-ascription
-bushwhack
-boggles
-affidavit's
-arrangement's
-congressionally
-convenient
-avoider
-abaft
-bootlegger's
-befriending
-ceases
-carbonizes
-clumps
-commented
-competence
-conversing
-butting
-astonishing
-armful
-allegory's
-crisis
-critiques
-concurred
-conservative
-aristotelian
-blizzard's
-corner
-amateur's
-compare
-affiliations
-bestseller
-batch
-cleanly
-assayed
-bravos
-bowls
-conceptualized
-babe's
-algorithm's
-baptist
-cheeks
-conquerer
-bidder's
-behaving
-briefcase's
-analogues
-amply
-attitude's
-apple
-crossable
-ambushed
-besmirches
-creditors
-bandwagons
-continentally
-adjuncts
-concerns
-agers
-cop
-amoebas
-bisected
-bombing
-appendices
-cocking
-bused
-babied
-compounds
-asserts
-believably
-alert
-apostate
-catalysts
-aureomycin
-convex
-beetle's
-banishing
-agitating
-bystanders
-bow
-connotes
-blanch
-charmingly
-animal's
-baritones
-brier
-astronomer
-company's
-balding
-actually
-aunt's
-avalanches
-acquisition
-base
-compilations
-bathtubs
-actualization
-chanced
-atom
-banged
-befuddled
-apologized
-componentwise
-britisher
-began
-conservationist
-actuate
-crosser
-appended
-bitten
-ambivalence
-acetate
-conversions
-buzzwords
-askance
-abolishing
-birdied
-creeds
-anglers
-colossal
-bereft
-chock
-apprentice
-cooper
-besmirching
-allocating
-antiques
-bikini's
-bonders
-afflictive
-augmentation
-atheist
-bucket
-bibliophile
-annexes
-beguiles
-birdbaths
-amendments
-animators
-asymptotically
-communally
-barber
-biographers
-arguable
-confidant
-apologies
-adorns
-contacting
-coarsest
-artichokes
-arraign
-absorbing
-alden
-commercially
-cabbage's
-coincides
-clumping
-cents
-alleviater
-buzzard
-braked
-anesthetized
-bugling
-capitalist
-befriended
-appreciatively
-boomtown's
-cozier
-critic's
-correspondent
-bard
-attenuator
-bake
-brings
-chews
-anechoic
-brutal
-colder
-buckshot
-canvassers
-analytic
-allies
-alloys
-awake
-alienates
-bin's
-crimes
-constructible
-classifiers
-bulb
-cream
-banquet
-axiomatize
-adjourn
-converted
-auditioned
-comfortably
-bandwidth
-cannibalize
-ascensions
-bussing
-balloons
-contenders
-commemoration
-aspersions
-consultation
-cashes
-belting
-augurs
-architectural
-bluebird's
-breastworks
-absconded
-bullets
-bloodstain's
-blunder
-astronautics
-coo
-approves
-authority
-assure
-amsterdam
-acquitted
-adversity
-celebrate
-bred
-bridged
-bloc's
-bullied
-affinity
-breezes
-baptistry's
-constitutions
-avouch
-amazingly
-consolation
-abnormality
-clashes
-buttes
-buzzard's
-breathers
-chipmunk
-contented
-carol's
-armers
-amazedly
-comprehends
-canonicalize
-breakthrough
-arbitrator
-butterfat
-cases
-besiegers
-affianced
-amelia
-bush
-airplane
-annulled
-bike
-alternated
-attackers
-convene
-aficionado
-anachronism's
-crude
-carelessness
-akin
-combated
-assisting
-clocker
-attacked
-briefed
-antic's
-attendants
-attracting
-cope
-allotting
-bandwidths
-add
-assaulting
-breakage
-climes
-arrival's
-burp
-accelerator
-capacitance
-arabians
-bankruptcy's
-archeological
-coins
-browbeating
-chasm
-cardinalities
-compartmentalize
-courter
-assess
-abreaction
-brakes
-compatibly
-compression
-characterizable
-briefing's
-alto's
-classifiable
-contrast
-correlation
-colonial
-applying
-authorizers
-contesters
-basely
-cherries
-clicking
-cornfield's
-alarmingly
-conferences
-business's
-banker
-bloomed
-airfield
-attracts
-building
-commutative
-atomization
-competitions
-boatsmen
-acquirable
-arkansas
-command
-beings
-compactors
-anodize
-arguments
-conforming
-adsorption
-accustomed
-blends
-bowstring's
-blackout
-appender
-buggy
-bricklaying
-chart
-calmer
-cage
-attractive
-causation's
-athenian
-advise
-cranks
-containers
-besotter
-beret
-attender
-cone
-bills
-aligns
-brushlike
-brownest
-bosom's
-berth
-accountably
-bequeathed
-affirmatively
-boundless
-alleyways
-commute
-bendable
-abhors
-calculation
-affidavit
-answerable
-bellicose
-counterfeiting
-admiral's
-chisel
-bridesmaids
-believers
-aggregated
-conspicuously
-abased
-armenian
-conspirator
-canonical
-assignable
-barrage's
-clearance's
-casts
-administratively
-befoul
-chaffer
-amazer
-colorer
-broaching
-crevice
-aniline
-coursing
-compassionate
-adhesive
-bibliographies
-corrects
-augments
-between
-causer
-amorist
-cellist's
-acoustical
-baseless
-cigarettes
-astuteness
-appropriators
-convincing
-bellhop's
-bemoaning
-calmingly
-chronologically
-castles
-algebraically
-appointees
-academic
-blunderings
-assassins
-barrel
-accuracy
-amortized
-ballpark
-acrobat's
-brazier's
-abortively
-coarser
-airfields
-contester
-circus's
-creased
-amorphous
-accomplisher
-blabs
-butchers
-crackles
-bachelor
-aviators
-chariot's
-circumflex
-binocular
-alienating
-artificially
-agreement's
-aglow
-afghan
-abrupt
-annihilates
-apologetic
-barge
-betters
-algorithms
-conjurer
-chargeable
-brindle
-alphabetizes
-coder
-availing
-bandpass
-arrogance
-convent's
-advertiser
-connected
-basso
-breakfaster
-comic
-congenial
-beau
-courters
-adapters
-abruptly
-chemicals
-bringed
-creaming
-butterer
-attained
-actuals
-averred
-brainwash
-centerpiece
-blabbermouth
-byproduct's
-adaptable
-automata
-art
-cheery
-beheld
-beehive's
-claimed
-crucial
-brokenness
-agility
-combating
-cleft
-amenity
-after
-configuration
-contrasting
-coarsely
-brass
-barnstormed
-bowel
-bridesmaid's
-cornfield
-crazing
-autocracies
-adult
-conceptualizations
-corroboration
-bedders
-arroyo
-alarmist
-boatman
-chests
-burglary
-budgets
-canary's
-arraigning
-chin
-barnstorms
-blamers
-brimful
-calculate
-cellular
-contended
-challenges
-brusque
-bikinis
-arithmetics
-chairpersons
-class
-aircraft
-capably
-centralize
-awhile
-compacting
-courteous
-archaeologist's
-cram
-adagio
-affronts
-amplitude's
-bureau's
-audaciously
-autism
-blueberries
-an
-chips
-confiner
-chopper's
-chronology
-breaching
-bead
-amass
-camouflage
-compensation
-aspect
-broker
-atrophy
-balk
-bloodless
-barnyard
-benefactor's
-airdrops
-caused
-anthem
-activist's
-bottomless
-arrogates
-avoided
-bouncy
-clarified
-articulate
-almoner
-communists
-blokes
-butternut
-clockings
-barium
-blows
-criticism's
-associations
-brute
-bleeds
-alliteration's
-bluestocking
-boxwood
-clearer
-allegiance
-conceptualizes
-captivating
-bolshevik's
-belabored
-biographic
-contaminates
-chanticleer's
-adjusted
-childhood
-arguing
-cape
-conversantly
-compensating
-collaborations
-arraignment's
-blasted
-charging
-aggregation
-apprentices
-bird
-codifiers
-ballistic
-breve
-bells
-carolina
-chalk
-buckles
-boyfriend's
-adorn
-accoutrements
-availability
-antisymmetry
-blades
-alluded
-asterisks
-bookcases
-additive
-consents
-advanced
-balalaika
-coders
-caliph
-alundum
-are
-controllable
-blazing
-clattered
-asiatic
-axiomatizes
-ace
-coining
-column
-auditor's
-carol
-concatenated
-arrayed
-capital
-cautioner
-clan
-beauteous
-abbreviate
-asteroids
-canal's
-consolidation
-closets
-concealer
-crevices
-abed
-complex
-conviction's
-abide
-arrests
-begrudges
-adolescent
-conceals
-cells
-circles
-bravest
-compromiser
-bagels
-areas
-afore
-allergies
-arrangement
-attraction's
-amulets
-abstraction
-captured
-crouched
-brothers
-cash
-achieving
-bastard
-compete
-boiling
-beaching
-amphetamines
-clerking
-congestion
-alleviates
-angry
-bared
-comprehended
-bloodstain
-constituency's
-automating
-aerial's
-counterfeit
-besotted
-basses
-biofeedback
-compilation's
-band
-consulate
-appellant
-cough
-antennae
-contend
-anniversary
-boor
-artifactually
-aerobics
-booths
-chubbiest
-consumable
-assignments
-bromide's
-confined
-breakers
-alongside
-courtier
-boisterously
-bilaterally
-alternation
-auspiciously
-arbitrated
-condemning
-burns
-correspondents
-composition
-cavalierly
-coverlets
-capacities
-clatter
-apotheoses
-cartography
-ceased
-capitalized
-auditor
-appendicitis
-chops
-barony
-anemometry
-befouled
-briefer
-chest
-begetting
-bloats
-bookseller's
-commitment
-confides
-carcass's
-battering
-altruistically
-ballots
-adornments
-broaden
-angularly
-coefficient
-cataloged
-brae
-advantage
-anthems
-calculated
-counseling
-agitate
-accentuated
-camel
-ambivalent
-bedposts
-beacons
-chubbier
-cheerer
-assumes
-concord
-autumns
-convention's
-alpha
-adulterates
-arbiters
-archaically
-criteria
-achilles
-cheaper
-bulling
-associators
-bloater
-brawler
-ability
-adherents
-commonwealth
-coyote's
-centrally
-bequeathing
-abandonment
-circumstantially
-courteously
-borrow
-countermeasure's
-capricious
-allied
-anagram's
-absorptive
-assuage
-asset
-booked
-aspects
-commits
-crates
-capacitive
-condones
-assimilates
-carriage
-competitor's
-cocoons
-aggravated
-caravans
-arbitrator's
-baked
-balanced
-annihilated
-addressable
-autonomous
-bandwagon's
-contesting
-burrowing
-coroutines
-abjection
-correctable
-applauded
-bragged
-code
-aggressiveness
-cluttered
-attacking
-chide
-am
-coasters
-blizzard
-contentment
-altruism
-certifier
-capturing
-combinators
-carefree
-activate
-blindfolding
-assassinating
-approximate
-biplane's
-aplenty
-arteriosclerosis
-concentrates
-antisymmetric
-assurances
-anarchist's
-ascend
-advancing
-atrocities
-butt's
-bearable
-craftiness
-categorized
-barn
-contributor's
-arises
-bushy
-bisque
-coasted
-bargaining
-area's
-couples
-cabs
-barter
-bulletin
-chisels
-broadcasters
-contingency
-bywords
-antimicrobial
-coexisted
-blinding
-arithmetize
-coweringly
-convince
-competed
-bauble's
-crab
-boggling
-advocacy
-atlas
-assembled
-ancient
-bloodstream
-balking
-bin
-bully
-affirm
-cruelest
-atone
-conserved
-confession's
-bat
-captive
-aster
-blames
-colonel's
-bones
-borderline
-cleanses
-classified
-crudest
-contiguity
-bailing
-ablaze
-bender
-attendee
-clobbers
-aliasing
-autopilot
-coolers
-cache
-allayed
-barnyards
-britons
-appointment
-adaptor
-blockers
-abridges
-bloodiest
-betrothal
-bombards
-bony
-bus
-canary
-antinomy
-awash
-comrades
-ablating
-collectible
-boats
-brand
-church
-bandy
-adhering
-barred
-ammunition
-chime
-accompaniment's
-battleground's
-composing
-caveats
-armor
-amoeba
-composure
-collides
-avowed
-banding
-counsels
-asymmetric
-abbreviates
-balky
-adjudicates
-anointing
-accursed
-copse
-action
-construction's
-accents
-ambition's
-caressing
-cosmetic
-accession
-clutters
-censures
-allusions
-belittled
-armchair
-abode's
-conception's
-ascribe
-aliases
-ancestry
-ax
-companionable
-aright
-boxed
-brighteners
-alloy's
-checkable
-arraignments
-bed
-bunkhouses
-abbeys
-ceasing
-companies
-cherishing
-chunk's
-barony's
-chinning
-burdens
-briskness
-beggarly
-beloved
-clambered
-constitutionality
-beguiled
-archers
-alleyway
-apostle's
-consulate's
-antiformant
-categories
-construct
-aliments
-acquired
-blotted
-alterations
-adolescent's
-cranes
-bluntest
-accusation
-chafer
-airstrips
-abolished
-bothersome
-churchly
-airy
-bedded
-awareness
-alliterative
-arose
-amputates
-civilization's
-arenas
-certifying
-aspirators
-carbon's
-bunching
-aerates
-bilked
-checking
-cloned
-administrations
-canvasses
-colorless
-chamber
-circumspectly
-benedictine
-advisedly
-classifier
-approachable
-banners
-concurrently
-chores
-agape
-convention
-bindings
-budget
-comedies
-ants
-ambassadors
-chroniclers
-carrots
-colorful
-bulkhead's
-coherence
-buyer
-aggressions
-congressional
-commoners
-cheapen
-concealed
-columnates
-anarchy
-actress's
-baseboards
-creature's
-centuries
-barbarian
-concrete
-bicycles
-acceptably
-acclimating
-biceps
-bloodhound's
-becalmed
-apostle
-bible
-conjunctive
-comb
-ballers
-bickering
-adulterous
-austrian
-applicable
-blackberries
-creasing
-catalogs
-avert
-asparagus
-cambridge
-bird's
-belgians
-admonished
-admirations
-conscientious
-crescent's
-connectives
-blissful
-commenting
-bagged
-assimilate
-abounded
-copyright's
-advancement
-axiom's
-compilation
-circumlocution's
-catheter
-chances
-concretely
-codification
-browned
-clustering
-bum's
-clauses
-boundlessness
-arteriole's
-alfresco
-begrudged
-blustered
-anglican
-adjoined
-bamboo
-bathed
-consortium
-carrot's
-cloak
-album
-bunglers
-approbate
-colored
-aim
-cowboy
-alienate
-cleverest
-ambiguous
-confrontation's
-clear
-africa
-bowline's
-astronauts
-belayed
-censorship
-animation
-bedrooms
-chasms
-compared
-cogitated
-barbarians
-accomplices
-columnizes
-beaming
-busied
-counterpointing
-aluminum
-coconut's
-acclamation
-chokers
-biomedicine
-basalt
-buckwheat
-cardinality's
-bafflers
-arid
-chap's
-abound
-biblical
-backbone
-anticipation
-condemner
-angular
-advisability
-believing
-boiler
-arclike
-abetter
-bespeaks
-axiomatically
-coarse
-auditions
-bludgeoning
-clam's
-chief
-arrow
-cementing
-anxiety
-aberrations
-brushes
-cherub
-corollary's
-bunters
-beefers
-barbiturate
-circumlocution
-conjoined
-charities
-coverage
-campaigner
-burrowed
-barracks
-bristling
-accomplice
-abandoned
-bull
-caked
-century's
-bantu
-bristled
-airer
-bench
-bevy
-chamberlain's
-attention
-cloning
-camouflaging
-alder
-counter
-credibly
-approvingly
-breakup
-artillery
-celestially
-bail
-baker
-bullish
-canvass
-conversationally
-bringers
-augment
-creditably
-butterers
-botswana
-contemptible
-bribing
-adumbrate
-barb
-calico
-alludes
-amplified
-chills
-cloak's
-aver
-arthropod's
-budgeter
-bereavement
-cellars
-crewing
-blackmailer
-ayes
-bedsteads
-breachers
-bazaar
-centered
-celebrity
-blameless
-abscissa
-aerators
-awaited
-british
-adversary
-cowslip
-buttons
-confusing
-buggy's
-belts
-canceled
-addresses
-bribes
-condoning
-bonneted
-coarsen
-amazement
-angels
-chemise
-carbonates
-apostolic
-bandit's
-contending
-consummate
-counterclockwise
-beneficence
-benefitted
-contradicts
-comfortabilities
-anemone
-conductive
-articles
-bookcase
-burst
-baptizes
-countless
-costs
-agonizes
-byte
-creeper
-begs
-bunnies
-attract
-able
-calories
-baskets
-american
-brunt
-cognition
-closing
-chef's
-backbone's
-complicates
-cloister
-bedsprings
-arrays
-brigs
-archbishop
-buckler
-clove
-catholic's
-bellboys
-chairmen
-clap
-clarifications
-ambuscade
-bight
-bellyfull
-allowance's
-academy's
-acquiescence
-ambush
-catches
-at
-billion
-contact
-bees
-adopters
-approximately
-chiseled
-attributively
-criers
-codification's
-cowslips
-contradictions
-buttock's
-categorically
-counterpart's
-confessor
-appreciably
-adjusts
-altitude
-construe
-cancer
-bay
-aristocratic
-alleviaters
-binoculars
-axiomatizing
-changer
-bustle
-civic
-bostonians
-crops
-authorizations
-cogitation
-baptize
-caressed
-abase
-ariser
-axiomatization
-aggravates
-confiscation
-bowdlerize
-backspaced
-alters
-clarity
-blots
-bland
-belligerent's
-burgher
-cardinally
-bookcase's
-buggers
-byte's
-avarice
-crowding
-beriberi
-allegories
-coronets
-cell
-calculative
-adduce
-amperes
-bladders
-adages
-contests
-cognizant
-actuates
-ambiguity
-brighten
-concert
-conviction
-booty
-ashtray
-braves
-blouses
-avoiders
-confederate
-bombings
-couplings
-convictions
-attractiveness
-chronicled
-corers
-anger
-covertly
-aural
-asynchrony
-arrowheads
-breakdown's
-bulletins
-ceremonialness
-clipper
-bracelets
-anthropomorphically
-benedict
-connecting
-bacterium
-achievers
-abutter's
-autocorrelate
-coupling
-blanketer
-continental
-assignment
-conundrum
-arab
-besides
-cheerful
-blowup
-bastion
-arrive
-combines
-agar
-cookie
-astronaut's
-constraint's
-article's
-confiscations
-bounded
-adjudicate
-belligerently
-boron
-brownness
-adept
-creep
-abduction
-accosting
-asylum
-autographed
-clash
-chiseler
-clumsily
-capitally
-braking
-absenting
-bagatelle's
-comet
-basked
-anything
-buffeted
-absentia
-bounty
-carols
-characteristic's
-constructive
-comforting
-aflame
-brainwashed
-booby
-aspirations
-adjudge
-behaviorism
-computability
-assessment
-consultations
-bowstring
-acknowledgment
-arranger
-chancellor
-attest
-compresses
-concessions
-asymmetrically
-administering
-clamoring
-arraigned
-archived
-admonition
-actor's
-aimers
-colorers
-booklet
-calibers
-affix
-bushel's
-atomizes
-creeks
-bleedings
-casuals
-archives
-certainly
-animate
-cons
-affiliate
-answered
-coyote
-coughed
-alligator's
-antagonized
-arousal
-assisted
-aerated
-competently
-conquering
-acclaimed
-assign
-announcer
-controllers
-amalgamation
-comfort
-antihistorical
-availed
-balsa
-annoyed
-basted
-asymptomatically
-cropped
-combinational
-barging
-conversant
-causality
-botches
-bedspread
-considerately
-bookstores
-climate
-blessing
-accordion's
-cdr
-bonanza's
-construing
-bearings
-bluster
-backspaces
-babyish
-countermeasure
-crime
-battered
-audit
-associating
-corps
-application
-archangel's
-aided
-breasted
-compelled
-acrobats
-breakfasts
-chronologies
-beet's
-averts
-convergence
-attributable
-adverbial
-churns
-arrest
-breastwork
-beefs
-brownie
-create
-contradistinctions
-coordinators
-abandoning
-byline
-beatitude
-autosuggestibility
-bipartite
-annals
-assents
-conceives
-amalgams
-cleft's
-clicked
-appointers
-bible's
-boots
-caret
-attaches
-controversy's
-combinatorial
-bazaars
-cardinals
-bored
-catering
-christian's
-ashman
-consequence's
-austere
-clay
-birthday's
-amongst
-arbitrariness
-brainstorms
-chateaus
-coaxer
-applause
-cautiousness
-adorned
-compromises
-creatures
-compliance
-apartheid
-archiving
-amoeba's
-communal
-comedian's
-aggressive
-crop
-ante
-better
-chalice
-aristocrats
-circling
-belittle
-abortion's
-coldly
-certification
-befriends
-courthouse
-anesthesia
-accorder
-athletic
-blithe
-bedder
-abasements
-councils
-beware
-abductor
-assonant
-clench
-aspersion
-abortion
-abating
-birches
-breakpoints
-acyclic
-ablate
-canners
-cistern
-boxtop
-composite
-cloudless
-computation
-chastely
-abusing
-bunker's
-compounding
-alveolar
-chaplains
-bias
-audiological
-capability's
-bangle
-barren
-antidote's
-cranking
-baptizing
-bond
-borders
-automobile's
-allegoric
-chargers
-baltic
-autumn
-columns
-absolute
-connoisseur
-cranberry
-contiguous
-consoled
-confirmations
-argot
-blouse
-annotated
-callous
-astounded
-crashed
-autonavigators
-chivalry
-columnating
-beefed
-convincer
-allegorical
-bagger
-assume
-containable
-artistically
-calibration
-architectonic
-campaigns
-addressability
-crazier
-buy
-brightener
-bastion's
-blurb
-awaits
-commands
-chocolate
-bleaching
-antenna
-blowers
-chorused
-composers
-assigners
-aspires
-coils
-bid
-application's
-clamped
-bedding
-awkwardly
-coppers
-costumes
-borax
-caged
-candler
-badges
-clutches
-consign
-apprised
-buys
-adiabatically
-aggregately
-canned
-abstract
-acrimony
-coax
-analytically
-absurd
-alluring
-contradicted
-aspersion's
-bribe
-boos
-chattererz
-backache's
-complying
-continent
-cohabitate
-causation
-astronomer's
-cities
-bookie
-bleating
-cracking
-bicameral
-convoluted
-adjustable
-ambulance
-can
-boulders
-consideration
-announces
-briars
-antipode's
-bartered
-ancestor
-biplanes
-characterize
-crested
-bum
-bridling
-consolable
-bungles
-coffee
-buffets
-congratulation
-commitment's
-adequately
-clown
-capacitor's
-broomsticks
-agglutinate
-activations
-asians
-canon's
-authenticity
-complexities
-cripple
-bracket
-counselor's
-beatably
-bounced
-baton's
-crankiest
-barbell's
-caster
-casseroles
-ballad's
-bob
-batched
-attenuated
-beakers
-biologist
-bleary
-condescend
-blondes
-augustness
-boldface
-battlefronts
-acumen
-bolting
-articulatory
-butyrate
-bowel's
-backwater's
-colonel
-creating
-authorized
-bijection
-accruing
-admirably
-correctness
-citadels
-clasps
-bandlimit
-bib
-appalachia
-contrives
-bundle
-audiology
-circumventing
-blinker
-choked
-bilks
-clears
-affirmations
-arbitrating
-bites
-bootstraps
-capitals
-commuters
-billeted
-authentication
-choice
-attentively
-aggressor
-arterioles
-crowds
-chestnut
-backstitched
-attachments
-assimilating
-bewilderment
-atrophied
-chintz
-blackjack
-armadillos
-bonfire's
-ballast
-agonies
-busier
-coefficient's
-adventurous
-ballet's
-coil
-chewed
-come
-bonder
-catalogue
-coursed
-arise
-biennium
-ceremony's
-blanching
-appraisers
-acolyte
-argues
-beholden
-appanage
-astatine
-banana's
-coons
-civilians
-bodyguard
-archipelago
-bug's
-candles
-antique's
-accidently
-blighted
-belgium
-besieged
-burned
-abuse
-asian
-chute
-awkwardness
-abasing
-bottler
-ardently
-blab
-breakwater
-cavity
-cheated
-befall
-according
-chronicle
-airframes
-bats
-choring
-authorize
-consumed
-chatter
-annunciated
-capers
-anomalous
-clustered
-burner
-acquaintance's
-badger's
-basic
-affectations
-buzzy
-coast
-attendances
-activating
-beams
-cohesive
-attainable
-barbecueing
-beautiful
-acronyms
-communion
-client
-atypical
-antagonists
-conservations
-arguers
-agglomerate
-antigen
-battalion
-ambition
-countered
-assistant
-classed
-arming
-alveoli
-buff's
-backplanes
-busted
-bermuda
-converting
-brutish
-boot
-acidities
-confrontation
-chapel's
-berlin
-ascender
-behead
-buddy's
-commandment
-actuated
-brilliancy
-chance
-bedrock's
-bridgeheads
-arable
-avid
-arteries
-caresser
-ballyhoo
-attested
-african
-comradely
-consciences
-commencing
-antennas
-annulments
-bobolink's
-advisee
-acceptance
-crack
-ascendent
-appendage's
-accommodates
-accumulated
-clones
-apocryphal
-ages
-cluster
-capitols
-camper
-beading
-amble
-buffeting
-circumspect
-advances
-analyzes
-courier's
-aperiodic
-appealer
-atonally
-attentive
-conspire
-appropriating
-armed
-allergic
-agglomeration
-consternation
-blinks
-audibly
-aspirins
-bunions
-adverbs
-armload
-bet's
-caring
-carryover
-coordinator's
-afterthoughts
-allays
-abided
-brownish
-baiting
-capitalism
-coined
-conspirators
-automatic
-contradistinction
-conductor's
-backstitching
-conjure
-casings
-accountant
-clinched
-constrain
-alcohol
-bee
-anticompetitive
-britain
-bade
-camera's
-antimony
-activated
-burglarizes
-compatible
-cotyledon's
-artificiality
-bath
-citadel
-archivist
-chandelier
-addiction
-ampersand
-bitterer
-constructively
-afield
-bing
-attractor's
-cringe
-allergy's
-bigots
-assimilation
-ate
-capitalization
-abridge
-buzzword
-befit
-bandlimited
-commandant
-alabama
-acculturated
-brightening
-bulldozing
-cooky
-bunks
-centers
-bespectacled
-adherent's
-abducts
-another's
-condensation
-billeting
-bye
-chess
-craziest
-ballgown's
-archaism
-consorted
-chinned
-cowl
-beat
-bootlegger
-bravado
-classically
-bulging
-browbeat
-accommodate
-borne
-bronzed
-artifice
-arcade
-become
-backlog
-addressers
-amphitheaters
-befogging
-crochet
-aiding
-celebrated
-conversational
-backbends
-authentications
-advertisement's
-blockade
-bulldozes
-contraction's
-bricklayer's
-brain
-conveying
-anemia
-chronology's
-channeling
-caution
-commanding
-crosses
-artisan
-conditions
-admired
-authenticator
-airships
-blunter
-bridesmaid
-counseled
-cheeriness
-chiefs
-boils
-clerical
-atrocity's
-balls
-ambled
-canvases
-consoles
-abscessed
-abetting
-blitzkrieg
-bottlers
-beveled
-condemn
-alumna
-cords
-admittance
-annotates
-citing
-corrector
-appreciative
-branching
-betrays
-buttoned
-ailment
-boulevards
-bottlenecks
-chamberlains
-bedbug
-covenant's
-crispness
-considering
-broadcasts
-audubon
-arousing
-correction
-barrack
-closure
-contrastingly
-brittleness
-assassin's
-bursa
-bungalows
-balked
-conceptual
-carcasses
-arabia
-blueprint's
-affectingly
-consorting
-buses
-auger
-appointed
-brute's
-bosoms
-anyway
-arrowed
-anaphorically
-clarify
-approachability
-assistance
-buzzes
-commonplace
-bluebonnet's
-adroitness
-availers
-aquifers
-architecture's
-action's
-backgrounds
-abduct
-attired
-briber
-admissibility
-cease
-beck
-auctioneers
-birdbath's
-atomic
-crossing
-considerate
-biconvex
-bulge
-bedridden
-arising
-aggression's
-cherish
-bureaucratic
-abater
-amputating
-atop
-climber
-clutched
-afford
-bisections
-bonnets
-commendations
-bloke
-abundant
-clamp
-aloes
-aboard
-atheistic
-advantageously
-buffs
-chimney's
-cheerily
-benefactor
-ample
-bushwhacked
-captain
-buckskins
-contextually
-antiquarian's
-browns
-bubble
-ban's
-brine
-acculturates
-anhydrously
-beaver's
-advantaged
-bibliographic
-clasping
-clattering
-coerce
-colorado
-airmen
-bandlimiting
-balks
-boners
-attached
-chosen
-convened
-bordello
-composer
-botanist
-backtracks
-civilization
-commutativity
-bloodshed
-cohere
-bunkhouse
-archdiocese
-boycotted
-crosswords
-bedspread's
-anteaters
-cove
-apothecary
-chute's
-addressee
-climatically
-blower
-bane
-cask's
-beetling
-ambiguities
-before
-abstain
-arachnids
-bucket's
-amateurs
-blackouts
-adverb
-butchery
-conjunction's
-barricade
-audiologists
-aphorism
-complete
-butts
-bishops
-allotment's
-confusingly
-channeller's
-blanches
-bragging
-bathe
-comedians
-celestial
-citizens
-couple
-backpack
-aphasic
-brothels
-axles
-cancellations
-bonus's
-consolidates
-authoritative
-axle's
-acclimatization
-carolinas
-chime's
-antibiotic
-bisons
-biographically
-achieve
-bleachers
-bicentennial
-behavioral
-accomplish
-concealment
-biddies
-antitoxins
-arriving
-apprehend
-affluent
-cliffs
-bleached
-astronomers
-connection
-bride
-backs
-bog's
-casket's
-continual
-ampere
-cat
-alternator
-cotton
-athletes
-communicant's
-best
-befuddling
-benefactors
-appease
-annoyingly
-context
-astonished
-cracked
-amnesty
-autumn's
-binder
-babying
-contributory
-assumption
-cowls
-cocks
-airless
-consummated
-atypically
-beneficially
-chairing
-accusative
-commanded
-bufferrer's
-alerter
-arbiter
-civilly
-charms
-backscattering
-cheater
-bushes
-caverns
-chieftain
-calf
-comparing
-aurora
-butyl
-cower
-bemoans
-baptistry
-carpenter's
-capes
-bordered
-arrows
-blocker
-crest
-appeal
-arabic
-conventions
-axis
-brains
-bookkeeper's
-circle
-cooks
-circumlocutions
-adventists
-barringer
-affording
-anatomically
-basements
-barbarities
-configuration's
-contributes
-collaborating
-beach
-comet's
-bakes
-assigns
-ballerina
-cheapens
-clinging
-conquered
-bisecting
-closenesses
-bugle
-boatmen
-beatings
-complicator
-bight's
-banister's
-archaic
-anthropologists
-clams
-beginners
-committee's
-communicants
-alone
-bounteously
-bastes
-ascertain
-alphabetical
-bringing
-batters
-amazon's
-constituent
-benders
-being
-constitutionally
-audiometric
-blast
-copings
-bailiffs
-colts
-coolies
-airlift's
-boomerang
-bifocal
-clothes
-cashiers
-congenially
-billows
-boilerplate
-biochemistry
-betting
-brimmed
-complementers
-breading
-bragger
-adducting
-bisectors
-abrogates
-criticized
-comrade
-bucolic
-birthright
-blurs
-challenger
-complicated
-bluebonnet
-biscuit's
-classmates
-campus's
-boundary
-bedbug's
-adjustor's
-acre
-bicycling
-awe
-additions
-baiter
-authorizes
-beautify
-copier
-buffet
-belfries
-acquisitions
-brooch
-crickets
-caterpillars
-beefsteak
-complicating
-bedpost
-criminal
-celebrity's
-bookseller
-christened
-coerces
-clamors
-all
-boatyard's
-canoe's
-begin
-anaerobic
-bushing
-agreers
-concedes
-countermeasures
-beg
-agglutinin
-bunted
-ammonium
-aspiration's
-bathrobes
-changeable
-beached
-bestowal
-beaner
-catsup
-admires
-clockwise
-agile
-alarms
-ached
-chinks
-buffer's
-cartesian
-annunciate
-chanticleer
-avenue
-anchor
-alliterations
-blanking
-bargained
-breathtaking
-crime's
-assiduity
-argentina
-contiguously
-aqua
-bested
-borderlands
-appetite
-captive's
-bipolar
-conceal
-counters
-costumed
-arrestingly
-bunting
-blight
-champagne
-brusquely
-address
-bloodhounds
-associative
-creed
-arithmetical
-balustrade's
-belabors
-complementing
-checkout
-archivers
-badlands
-behaviors
-ampoules
-bridgehead's
-antiquarian
-clumsiness
-considerable
-apportions
-anglicans
-appealingly
-barfly's
-absorptions
-awards
-congregates
-cloister's
-armour
-avoid
-correctively
-chucks
-burps
-bums
-berry
-batches
-administration
-atones
-bishop's
-blonde's
-casualty's
-cores
-bodied
-alter
-assonance
-apprise
-antitoxin
-avariciously
-checkpoint's
-affirmative
-conjures
-angstrom
-aesthetically
-canyon
-binge
-crazed
-breastwork's
-aids
-boston
-conceits
-announcement's
-beechen
-accessory
-authorities
-constrained
-automation
-anaplasmosis
-commander
-commendation's
-belabor
-cornfields
-artemis
-asphalt
-contracted
-brochure
-crafted
-allegedly
-alien's
-auditory
-blowfish
-adducible
-confederations
-annuals
-britches
-acquaintance
-appallingly
-abounds
-burglarproof
-crossers
-bayous
-brisk
-authority's
-covetousness
-averse
-accomplished
-aromatic
-admiral
-bijective
-avenging
-bran
-boatyards
-beseeching
-challenging
-bares
-acts
-abductions
-compendium
-compulsion's
-calendar's
-clad
-blockage
-conventional
-craze
-cajoling
-acceptability
-bungalow
-buff
-cramps
-attackable
-calculator's
-asp
-braved
-colors
-balling
-contaminate
-crackling
-comes
-complimenters
-across
-astronomy
-aborigine
-bobwhite's
-autopilot's
-chattered
-appall
-autonavigator
-bashed
-acoustics
-beachhead's
-apartments
-convenience
-blackout's
-bands
-autonomously
-amounters
-centripetal
-achievable
-astringency
-attuned
-concatenating
-copyright
-coding
-assumption's
-anastomoses
-confiscate
-asking
-beneficial
-adhesions
-busboy
-bronzes
-audacity
-bruises
-crash
-beau's
-circuit's
-aborts
-baubles
-beliefs
-assuaged
-costed
-blinking
-characterized
-bowled
-block
-conquests
-confesses
-amusers
-ceiling
-berets
-berliner
-abstentions
-child
-authoritatively
-closeness
-bushel
-considered
-communicates
-cheerlessly
-autofluorescence
-aquarium
-affects
-appurtenances
-airbag
-approaches
-admonishments
-bets
-bounden
-courtly
-bodybuilder's
-campus
-brainstorm
-americans
-chairperson's
-botanical
-askew
-amazon
-bleed
-clime's
-cooperations
-commonness
-boatloads
-blinked
-courtyard
-adapted
-aforethought
-backwater
-burr
-cathode
-awaking
-buzzed
-bridgeable
-arrives
-adventuring
-beseech
-attrition
-copied
-colon
-client's
-bandstand's
-advice
-baptistries
-antithetical
-alcohol's
-contradicting
-ambidextrous
-belches
-category
-bluntness
-coupon's
-assimilations
-comfortable
-caller
-affliction's
-attends
-compactest
-baler
-beacon
-blind
-bleakness
-beseeches
-courts
-couch
-consequential
-adulterers
-craving
-biggest
-astray
-bigoted
-barfly
-charges
-ambiguity's
-commentary
-crankily
-cowerer
-carnival
-bachelor's
-bituminous
-continuance's
-calamities
-claws
-apiece
-century
-ascendancy
-charts
-animations
-aggression
-chickadee's
-carve
-confidence
-actor
-bubbled
-becalming
-convulsion
-chivalrous
-brightest
-centralized
-beautifies
-amateurishness
-birthrights
-alligator
-circumstantial
-constructors
-conceptions
-arranging
-cart
-cent
-ager
-congruence
-carrot
-chariots
-cloudier
-captivity
-conquerers
-compartmentalizes
-condensing
-celebrities
-chalks
-accordance
-chilled
-conversations
-apples
-conceiving
-average
-blessed
-creator
-ant
-cling
-annoyer
-aviation
-cohesively
-correspondences
-boor's
-apprehended
-bessel
-both
-characterizes
-bards
-cots
-acculturating
-cemeteries
-carting
-alcohols
-bitterest
-ascetic's
-conducts
-caking
-airspace
-autocrats
-ashes
-chimes
-broadcaster
-commuter
-basket
-borderland's
-broadened
-boyish
-allegretto's
-ban
-bidder
-christen
-blessings
-bury
-arranged
-choir's
-apathetic
-boring
-aryan
-appearing
-binds
-cooperates
-bounces
-airspeed
-complicators
-adapting
-babbled
-agglomerates
-bedraggled
-addictions
-bolt
-calmly
-blur's
-boatload's
-anesthetic
-bugs
-colt
-completing
-boxer
-billers
-affronting
-absurdity's
-chides
-comparatively
-braided
-clipper's
-cot's
-calves
-articulations
-branchings
-attraction
-concatenates
-alligators
-cake
-boom
-crashing
-afar
-abler
-beamed
-adverse
-adrenaline
-agriculture
-beehives
-crankier
-courthouses
-advises
-consigns
-bisect
-azimuth's
-carpets
-arthropod
-brewery's
-commonalities
-altruist
-astride
-appreciate
-carved
-briefs
-admitter
-celery
-congregate
-clocking
-assassinated
-adding
-canvasser
-civics
-contemptuously
-calculates
-advisees
-bumbling
-algorithmically
-cloudy
-algebras
-addiction's
-cop's
-assurers
-confidently
-affector
-analyzers
-chimneys
-burdening
-antitrust
-admix
-avoidance
-choking
-coexists
-accustoms
-cellar
-anchovy
-constructor's
-confinements
-consequently
-accelerations
-accoutrement
-churchman
-biller
-affected
-brigades
-cremating
-corridor's
-bagging
-ah
-berating
-collective
-acuteness
-arrestors
-cab's
-border
-agitation
-animism
-arches
-alveolus
-cessation's
-averrer
-abash
-counterrevolution
-attesting
-animateness
-bawdy
-americana
-bloodstained
-applicator
-annotating
-annunciator
-clamored
-acting
-aerosols
-axiomatization's
-brags
-coalesces
-avocation
-combining
-crazily
-bravery
-burying
-adored
-airfield's
-accounting
-broadeners
-anise
-chimney
-added
-avenges
-bellicosity
-cranberries
-arsenic
-communities
-comparable
-bunkered
-architect
-alphabetically
-beautified
-apogees
-communist
-anatomical
-complexity
-accost
-autographing
-browsing
-ameliorate
-bookers
-bandaging
-clinical
-appellants
-counteract
-clairvoyantly
-bootstrap's
-canner
-boastful
-attainer
-ash
-beaded
-brake
-barest
-befriend
-burglarproofing
-allegorically
-bunts
-believes
-accession's
-buck
-boathouse's
-byword's
-anthracite
-accuse
-conjunction
-burping
-commandant's
-creativity
-affirming
-bark
-amuses
-balcony's
-auditors
-counsel
-clamber
-borates
-cowboy's
-bickered
-boors
-combing
-biting
-breeze
-crowder
-corn
-bloke's
-bombast
-bookstore
-blared
-bedlam
-carbohydrate
-coops
-bundles
-blistering
-antarctic
-anterior
-bilinear
-chocolate's
-context's
-alternating
-annoyance
-constancy
-ambivalently
-buddy
-brutalize
-bobbin
-alleles
-commotion
-attributes
-airborne
-creed's
-bolstering
-coaxed
-airframe
-breaker
-accept
-abashes
-attentional
-contributor
-comparability
-auscultating
-cocked
-computationally
-buffered
-career's
-analyzable
-absently
-courtyard's
-buildups
-apportioned
-balkanized
-annulling
-cremation
-buffetings
-conditional
-confided
-airliner
-bulldozer
-approaching
-anagram
-apollonian
-canaries
-bloat
-bluebird
-collision
-cool
-connectedness
-abasement
-artisan's
-avoidably
-clerks
-afflict
-briton
-corroborates
-cameras
-counted
-boldest
-burglars
-brutes
-brows
-abhorrent
-configuring
-averaged
-ace's
-buying
-abandon
-bayou
-cottons
-auditioning
-amplifies
-clippers
-brainstorm's
-alto
-brutalities
-bunch
-agricultural
-bursts
-blunting
-archer
-activity
-carefulness
-bedroom's
-concomitant
-balm's
-artificer
-barking
-breathy
-babies
-acacia
-bodies
-cap's
-criticised
-conversed
-crewed
-ascendant
-budgeting
-coroutine's
-charmed
-bellboy's
-conservatism
-butler
-acculturation
-conclusion's
-adapt
-cellist
-contempt
-adumbrates
-borrowed
-confounds
-allegiance's
-blabbermouths
-accrues
-captor
-coop
-baseballs
-cottages
-apartment's
-assertiveness
-assent
-artfully
-bagger's
-abolishment
-acetylene
-accessory's
-blackbird
-baptist's
-consist
-cavern
-buttock
-corporal's
-autoregressive
-bailiff's
-birds
-corder
-bracketing
-antlered
-barbiturates
-county's
-addicted
-agglutinated
-abashed
-competitively
-captains
-bloating
-accepts
-choose
-ashamed
-backyard's
-apiary
-contradiction
-balalaika's
-arctic
-broom
-anvils
-coffee's
-alliance's
-agitator's
-change
-adjusters
-cremates
-complexes
-bodyguard's
-burl
-antithyroid
-ambient
-airfoil
-apricots
-athleticism
-abjectly
-bankrupts
-answerers
-alternatively
-confronter
-breaking
-baronial
-cannibalized
-appetites
-breaded
-blackboard's
-battlegrounds
-cosine
-barrenness
-abbreviation
-budging
-boolean
-acrobatics
-again
-ashtrays
-clashed
-contingent's
-compulsion
-bedazzled
-collapsing
-comparison's
-businesses
-compassionately
-achievement
-buffering
-candlesticks
-austerely
-awls
-associate
-absolved
-annexed
-airway
-clipping
-counselors
-conscience
-attempters
-constructing
-biases
-cautioners
-comma's
-cosines
-char
-auscultates
-afire
-comely
-amity
-beverage's
-anew
-ballplayer's
-adulterated
-authorship
-alterers
-burdened
-attributive
-afflictions
-blinded
-barrier's
-attachment
-brotherhood
-bridegroom
-atoms
-cobweb's
-copes
-controversies
-complexion
-crawling
-atomized
-adjust
-accuracies
-concern
-cinders
-authorization
-appraisingly
-bladder's
-cooked
-cowers
-batter
-commissioner
-close
-burglar's
-allocated
-anvil
-aftershock
-abrogating
-chemistries
-advisable
-conduct
-committee
-blaring
-appalling
-braveness
-alertly
-artificialities
-brevet
-collision's
-arizona
-bower
-creamers
-awnings
-arsenals
-crane
-city
-contemplative
-catheters
-administrators
-attorney
-churned
-attractions
-columnation
-bobbed
-centipedes
-bostonian's
-apprises
-buries
-allege
-botulism
-adobe
-ambassador's
-covenants
-boon
-asynchronously
-bigness
-axial
-chaffing
-battleships
-ant's
-anthropological
-accent
-brushing
-brassy
-consumptions
-battleship
-absorb
-beckons
-brook
-connectors
-clinches
-accesses
-beaters
-archaicness
-bursitis
-chided
-bomb
-assimilated
-addicts
-convening
-arianists
-counting
-altar's
-confusions
-attachment's
-clipping's
-amazing
-corset
-bossed
-attach
-commandingly
-animatedly
-allegations
-assuages
-annulment
-compress
-aptitude
-absurdities
-autobiographic
-aspect's
-concentrator
-burgesses
-anagrams
-bedeviled
-assemblers
-convinced
-commentary's
-agglomerated
-biological
-callousness
-axolotl's
-atmospheres
-authoritarian
-cancer's
-above
-charting
-aldermen
-battler
-cistern's
-bouncer
-amassed
-conquest
-altering
-arrogantly
-brokenly
-comparator
-counsellor's
-attenders
-cackle
-criticize
-authored
-ably
-believed
-compelling
-accepter
-cleansed
-afflicted
-backslash
-computed
-almighty
-attache
-braes
-carriage's
-benediction
-brigadier's
-contemporariness
-boomtown
-amplitudes
-breakwaters
-clod
-catch
-bar's
-activist
-caves
-assenting
-camp
-attainments
-brotherliness
-continuances
-appearance
-applicator's
-browbeats
-banjos
-addendum
-became
-adduces
-armadillo
-brothel
-almanac
-courageous
-assault
-chunk
-coaching
-atheist's
-blunted
-aperiodicity
-congresses
-boastfully
-burglarproofed
-broadest
-bashfulness
-affect
-acne
-bottleneck's
-criticisms
-corrupts
-colonized
-closeted
-canonicalizing
-auditorium
-antenna's
-awfully
-anti
-consumes
-agonize
-algebra's
-championing
-blush
-bugger
-antagonize
-beethoven
-blase
-boycotts
-compensatory
-bugged
-boroughs
-anatomic
-batons
-arguably
-affricates
-appreciations
-cavalry
-alumna's
-arcing
-backpacks
-braces
-contextual
-coupon
-chillingly
-allocates
-abuts
-contribution
-commodity
-admonishing
-coolly
-cabinet's
-collapsed
-confessions
-adjured
-capriciousness
-chastising
-babe
-aerodynamics
-accepting
-concept
-contour's
-consequentialities
-birthday
-bankrupted
-birthed
-benefit
-concentrations
-azalea
-channels
-chestnuts
-contenting
-antedate
-censors
-contagious
-abbot's
-channellers
-apt
-commend
-avocation's
-admonition's
-abolition
-confederation
-carried
-clumsy
-coincidences
-bumper
-burr's
-bugles
-bribers
-attainably
-consume
-comma
-creativeness
-accuser
-bombs
-abbey
-baffled
-aside
-clip's
-appeases
-compass
-bundling
-abstractionism
-confide
-creases
-apropos
-confronted
-corrective
-concurrencies
-autocratic
-alien
-attending
-antagonistic
-broadcast
-asymptote's
-belied
-breasts
-contrapositives
-coiner
-accordingly
-cohering
-computers
-cow
-bibs
-ancestral
-controller
-attacker
-alerts
-coconut
-agency
-alerted
-alcoholism
-ammoniac
-actinometers
-acquitter
-bud
-cessation
-alleging
-centralizes
-articulators
-council's
-carvings
-arduously
-blown
-anode's
-arrogate
-bisects
-centimeters
-burgeoning
-course
-appointee's
-ascribable
-communicate
-contrivance's
-adoptions
-attune
-acres
-abyss's
-corporal
-certifiers
-analyze
-augusta
-bestseller's
-checkpoint
-coexist
-attainers
-argon
-bearded
-crudeness
-averaging
-brick
-adducing
-annulment's
-chicks
-blocked
-cisterns
-afoul
-affiliates
-briskly
-adhesion
-ascertainable
-appeasement
-blueprints
-agreements
-blindfolds
-communicator
-characterization
-annoyances
-breeches
-brushed
-clinic
-competes
-chuckled
-cradled
-balmy
-antisubmarine
-alternate
-armpits
-barn's
-conjuncts
-adhere
-allows
-counteracted
-appetizer
-capturers
-cleanse
-avant
-abbe
-corpse's
-arduousness
-badge
-begets
-contemplated
-caveat
-copiously
-athena
-aggrieving
-alibi
-accumulation
-basket's
-aftershocks
-bass
-conjuncted
-chaps
-brunch
-colonials
-bibbed
-clusters
-antagonizing
-constituencies
-combings
-bearish
-continuously
-adequacy
-brow's
-catalog
-alderman
-comedic
-chemists
-concernedly
-conceded
-alarm
-arced
-buckle
-confidingly
-coherent
-closes
-buffoon
-brace
-adjustably
-crackers
-contamination
-burgess's
-aerobic
-constitutes
-baptismal
-broadness
-blimps
-concatenation
-claiming
-bard's
-aerosolize
-adjoins
-copies
-coats
-boggle
-corroborated
-concreteness
-bill
-cautions
-bantam
-bearably
-armchair's
-birthright's
-cravat's
-cone's
-courtiers
-asunder
-bulletin's
-biopsies
-alley
-contrive
-blasphemies
-amuser
-ballerinas
-blushed
-causticly
-brandy
-blinkers
-complimenting
-crimsoning
-angola
-apprehensiveness
-bolster
-columnate
-byproducts
-berths
-accusal
-chubby
-arrived
-camps
-blemish's
-anaconda
-cook
-airfoils
-atlantic
-boosted
-converge
-availer
-appalachians
-coffin's
-boarding
-alga
-crouch
-columnizing
-consul's
-chastises
-angling
-apple's
-billiard
-attentiveness
-adroit
-apprehensible
-cereal
-blouse's
-browning
-bodybuilder
-coaxing
-assertion's
-connective's
-commemorated
-accountability
-crooked
-blips
-chandeliers
-aristocracy
-bangs
-coke
-abutment
-community
-calculus
-congregated
-crepe
-compromised
-airlines
-contributing
-contingencies
-coordinated
-alginate
-batted
-contender
-alma
-antagonisms
-accompanied
-airport
-administrator's
-appraisal
-breadbox
-condemnation
-backlog's
-available
-consequents
-crooks
-commonwealths
-barring
-channeller
-crucially
-archaeological
-charming
-adventist
-credits
-appetizing
-breads
-clients
-climbing
-aloneness
-abstractness
-appearer
-astute
-clockers
-antagonizes
-agonized
-bastard's
-conjectured
-aqueducts
-aureole
-boatswains
-conjured
-chauffeur
-complementer
-behold
-bustards
-bivouac
-cluck
-anus
-bless
-catastrophic
-bounty's
-allowed
-answer
-concealers
-brainchild's
-coercion
-buzzword's
-bordellos
-appertain
-applier
-couriers
-aesthetic's
-craft
-capacitances
-capped
-coupler
-category's
-anvil's
-conquest's
-checksums
-clucking
-bronchus
-acrimonious
-changeably
-accenting
-argued
-conditioning
-brewing
-backwardness
-cascaded
-atomize
-contours
-arianist
-apart
-conflict
-carefully
-banshee's
-conveys
-arbitrates
-amphitheater's
-amen
-alimony
-bound
-buzz
-courtroom
-apparently
-coalescing
-circulating
-amounter
-bypasses
-breadth
-choral
-completion
-arisen
-anticipating
-bilges
-contractions
-bedspring
-commune
-blacklisted
-beagle
-alkaline
-atolls
-carelessly
-blimp
-corking
-brevity
-alterable
-canada
-bear
-bluntly
-cartridges
-connoted
-countries
-corroborate
-consecration
-corrupted
-appreciating
-combatant's
-alkalis
-affecting
-blues
-casserole
-ballad
-bewitches
-common
-as
-because
-bathroom's
-anchorages
-beguile
-connect
-convenience's
-counteracting
-assorted
-care
-contains
-centimeter
-ancestors
-briefings
-busses
-churchyards
-breakable
-amortizing
-courthouse's
-click
-courses
-ajar
-county
-covet
-confidences
-capitalizer
-agog
-backtracking
-copious
-bestsellers
-chilliness
-bringer
-browse
-centipede
-bawled
-bricklayer
-breath
-assailants
-abysses
-command's
-characterizer
-calculating
-america's
-aurally
-contain
-alias
-commentators
-confounded
-appending
-accidents
-chatters
-coordinates
-bleeder
-blueness
-badger
-bolsters
-astounding
-capitalist's
-conservation's
-commences
-aimed
-bun
-comparators
-competition
-bauble
-backbend's
-bled
-assassinate
-chop
-anemometer's
-cobbler
-coldness
-audiometry
-affinity's
-amalgamates
-cowardly
-consolidating
-beads
-brackish
-bookings
-accuses
-bog
-compartmentalizing
-clutching
-calming
-collars
-clambers
-banqueting
-beaked
-authoring
-correspondence
-apostrophes
-affirmation's
-bespeak
-costing
-brought
-complainer
-battalions
-asymmetry
-boathouse
-canyon's
-awarded
-amplitude
-anarchical
-anticipatory
-bolder
-cooperatives
-caterer
-adviser
-balkanizing
-augur
-cannibal's
-balustrades
-attaching
-collector's
-commercials
-capaciously
-coincidence's
-bumps
-ascot
-bale
-blackmail
-baby
-aftereffect
-bloomers
-buttresses
-avenues
-climaxes
-aqueduct
-cater
-brainchild
-avail
-bypassed
-bowl
-california
-cements
-boxes
-brained
-bedevils
-captors
-acuity
-ascends
-breakthrough's
-assigner
-caner
-bequests
-ceilings
-axers
-bookshelf
-autistic
-celebrations
-axons
-chiding
-asterisk
-allophonic
-blindingly
-cherubim
-boaster
-confining
-anxious
-clowning
-advisement
-approach
-anesthetic's
-crescent
-alertedly
-birdbath
-beardless
-bras
-auspices
-choosers
-approval's
-afflicts
-corrosion
-arpeggio's
-bodyweight
-cranky
-battlefront
-affirmation
-churchyard's
-aeroacoustic
-anders
-adjustment
-baneful
-citation's
-acetone
-blend
-binuclear
-boner
-annotation
-announce
-claimable
-contemporary
-clothing
-acquitting
-choosing
-attacher
-bananas
-binaural
-arrestor's
-aches
-conclude
-collaborators
-await
-blaspheme
-bequeaths
-crows
-balconies
-begging
-conducting
-abstracts
-assignee's
-causations
-approximation
-articulated
-considerably
-apricot's
-afferent
-assertively
-bonding
-calms
-cranberry's
-cost
-captaining
-agenda
-corridors
-complaint
-christens
-aggravate
-countess
-arbitrators
-ascribing
-breech's
-bellwether's
-burglarized
-confinement's
-animating
-adjectives
-cannister's
-bemoan
-cleanest
-acme
-cheapest
-activities
-allophone
-boy
-belaboring
-captions
-compactor's
-actuator's
-befouling
-arachnid's
-computerizes
-compile
-absorption
-bridled
-absorber
-convicts
-birch
-alkaloid's
-cannot
-bacilli
-charitableness
-abated
-ceaseless
-beavers
-bookshelves
-commensurate
-appreciates
-basil
-cartoons
-aides
-buxom
-cages
-cantor's
-acceptances
-antiquated
-amalgamate
-babyhood
-beers
-conforms
-bouquets
-canner's
-baste
-cashed
-argue
-butcher
-backbones
-absolve
-crib's
-cafes
-abstracted
-book
-committees
-authentically
-conference
-antisera
-bourgeoisie
-attribute
-biddy
-autobiographies
-chivalrousness
-coverlet
-ambiguously
-calorie
-anhydrous
-alignments
-around
-archfool
-advance
-bedpost's
-affective
-contained
-amain
-bromides
-clogs
-bricker
-arduous
-consistent
-amidst
-confess
-complain
-anniversaries
-coasting
-cobwebs
-aries
-benchmark
-aviaries
-bombard
-boxers
-ashtray's
-assyriology
-blaze
-ablative
-chaos
-burro
-arguer
-ashamedly
-crier
-allocator's
-aggressively
-carts
-advisory
-airship
-alkali's
-backup
-chaining
-continue
-cartoon
-circumference
-breadwinners
-autonomy
-banking
-armored
-cabin
-chunks
-antigens
-blistered
-airers
-breakaway
-belief's
-belays
-coveting
-auburn
-careful
-anybody
-bumbled
-cautious
-adopter
-ballplayers
-anteater
-citadel's
-avails
-agent's
-caliphs
-bridgehead
-already
-caterpillar's
-coachman
-centralizing
-alphabet
-concede
-barbell
-breadboard
-ballast's
-activators
-attendance
-blandly
-calculator
-codeword
-addressee's
-avenue's
-alcoves
-alternately
-admonishes
-concentrate
-crossbars
-adjoining
-basset
-carbons
-beast
-blonde
-castle
-clarification
-bitch's
-abrasion's
-books
-amputate
-bicycler
-aphonic
-arraigns
-acquiesce
-buster
-chaperon
-advisements
-buyer's
-attack
-birthdays
-blazed
-confuser
-crag
-ballet
-airports
-bison
-counterexamples
-arteriole
-colony's
-adamantly
-blunders
-chivalrously
-adult's
-authors
-amplifiers
-counterfeited
-complicity
-astrophysical
-axolotl
-bash
-battleground
-butterfly's
-axioms
-allegory
-blitzes
-blindfold
-bufferrers
-approximating
-byways
-computations
-alight
-avoiding
-assurance's
-barrages
-canonicalized
-callously
-auditing
-authenticating
-bag's
-asters
-artistic
-bonanzas
-applaud
-certainties
-auto's
-concession's
-cascade
-chubbiness
-churchyard
-afternoons
-antigen's
-baron's
-amphibian
-banister
-capitalize
-approval
-appropriated
-bureaucrat's
-covets
-cloisters
-circulate
-bivalve's
-beta
-collector
-among
-cane
-birdlike
-attenuating
-conjunctions
-appliance's
-coral
-crucify
-abnormal
-combined
-classroom
-buckskin
-commissions
-abolishments
-arching
-croak
-americium
-associates
-car's
-assuringly
-agreer
-anticoagulation
-closure's
-corkers
-attend
-alphabet's
-awakening
-composedly
-attracted
-construed
-cricket's
-applicability
-autonavigator's
-chloroplast's
-ashen
-beggars
-corporation
-another
-conflicts
-bootlegs
-archeologist
-alcove's
-agitates
-cargoes
-creditor
-cops
-advisably
-coronation
-bourgeois
-crochets
-cropper's
-cramp's
-adulterer's
-corroborations
-changing
-combinatorics
-calm
-comprehensible
-blooms
-coolness
-copying
-blacksmiths
-commodore
-compulsions
-clump
-afterward
-crucified
-brooder
-buckets
-accelerating
-accented
-boat
-adventitious
-baseline's
-courier
-calamity's
-atoll's
-brutalizes
-bundled
-chairperson
-cheeses
-continuation
-celebrating
-apologists
-behest
-bumpers
-consonants
-circulation
-betraying
-commuting
-breezily
-circumstance
-coughing
-benefiting
-conquerors
-chemically
-commencement
-adjustors
-angel
-congratulate
-conspired
-causally
-bud's
-conquers
-augmented
-bereaving
-advisor
-articulation
-angler
-admission
-bide
-competitors
-amusement's
-collecting
-adder
-arithmetized
-cheek's
-apostrophe
-blockages
-clockwork
-bubbly
-apricot
-adjudicated
-banter
-amused
-breacher
-bracketed
-aimer
-comprehending
-bunkers
-canton
-arcane
-absent
-capitol
-consequence
-cognitive
-abjuring
-clever
-coronet
-anathema
-artichoke
-controls
-credulous
-acid
-crawled
-coupled
-boomtowns
-aspen
-acted
-anyhow
-burdensome
-backdrop's
-apocalyptic
-cornerstone's
-cautiously
-blisters
-conveniences
-arbor's
-accessories
-alleges
-clubs
-accompaniment
-blazes
-annually
-clique's
-beamers
-ballgown
-autumnal
-acreage
-conjunct
-balances
-consoling
-canvas's
-competent
-aggrieves
-although
-afraid
-clearly
-cognizance
-acoustic
-colleague
-causing
-absences
-closers
-airs
-cinder
-adversaries
-altruistic
-brews
-ceremonially
-appraisal's
-commissioners
-army's
-assists
-acceptor
-comparison
-cooling
-conveniently
-couching
-changes
-clinic's
-confronting
-adjunct's
-blandness
-alternates
-bunter
-consequent
-clean
-autos
-accumulators
-carver
-aprons
-awful
-bobbins
-blasphemy
-assuming
-abscess
-assemble
-cabinet
-atomics
-blacklists
-audacious
-assay
-anthropology
-barnstorm
-awl
-bumping
-assembles
-capture
-compensates
-coverable
-amend
-array
-continually
-absented
-cigarette
-antiresonance
-backspace
-branched
-appellate
-courtroom's
-alienated
-austerity
-cement
-asked
-antelopes
-cottager
-bluebonnets
-booze
-amendment's
-backslashes
-begun
-bijections
-cafe's
-boatload
-collect
-appeals
-belittles
-befit's
-beauty
-arrogated
-academia
-contagion
-blemishes
-coverlet's
-comfortability
-antecedent
-controllably
-congressman
-complicate
-coincide
-arrears
-clumped
-credited
-buffoon's
-catholic
-accompanist
-beauty's
-aster's
-blatantly
-bothering
-bewilder
-canceling
-carbonizer
-accentuation
-backstairs
-anticipations
-bestowed
-civilian
-blooming
-blunts
-airlocks
-argo
-blueprint
-aristocrat
-cakes
-complements
-ale
-camping
-army
-adrift
-bengali
-barely
-blasphemes
-briefcase
-brooches
-ailments
-blazers
-crevice's
-bankrupt
-archiver
-articulator
-alphabets
-bonds
-colliding
-candidate
-cashier's
-bellwethers
-airstrip
-announcers
-calendars
-corrupter
-aqueduct's
-axiom
-bathing
-blusters
-ascribed
-admittedly
-angrily
-analytical
-contraption
-convertibility
-abysmal
-cathedral's
-aversion's
-algol
-articulately
-breveted
-bickers
-chatterer
-adoptive
-bijectively
-cloudiest
-coarseness
-carted
-cocktail's
-capacious
-anion
-buffoons
-bleeding
-bedrock
-adventurer
-compositions
-camouflages
-brittle
-chip's
-aloe
-chorus
-cargo
-critical
-biographer's
-abject
-blasphemousness
-charmer
-betray
-blacking
-awoke
-allele
-bags
-claimant
-clover
-biographies
-confound
-advertises
-crafter
-cripples
-bygone
-concentric
-couldn't
-contentions
-acrid
-costume
-aft
-aesthetic
-bandits
-adducts
-constellations
-coffer's
-created
-commercial
-art's
-cookie's
-ammonia
-adjunct
-articulateness
-congratulated
-crags
-brandishes
-annual
-byword
-affection's
-college's
-aboriginal
-bikini
-buttering
-allotter
-console
-advent
-activates
-beverage
-april
-acceptable
-barrel's
-boys
-attractor
-azimuth
-critics
-ballooner
-aren't
-adulterating
-criticise
-abeyance
-automatically
-collaborative
-capabilities
-crawls
-anomaly's
-climaxed
-animately
-aroma
-belie
-attires
-argumentation
-baseboard
-bluebirds
-cactus
-byproduct
-balancer
-beholder
-conservationist's
-betrayer
-agony
-accusingly
-convict
-coaxes
-breeds
-agitated
-championship
-brevets
-auscultate
-counselling
-cornerstones
-america
-canoes
-aspirator
-compensate
-antiseptic
-bereave
-absinthe
-compose
-collide
-alabamian
-candid
-civilized
-clamps
-authoritarianism
-colonist
-bugging
-bins
-abashing
-battlers
-canning
-berate
-assembler
-amateurish
-boasted
-angriest
-bluffs
-colonize
-balcony
-bleat
-bustard's
-attenuate
-contagiously
-bicep
-babel
-beatniks
-brush
-analogy's
-audiologist
-assessment's
-camera
-arbitrary
-alleyway's
-concession
-constructions
-accompanies
-accretion's
-aroused
-charcoaled
-belated
-bottom
-bloodshot
-bisques
-advocate
-arabs
-cathodes
-adamant
-challenge
-absurdly
-abolitionist
-cleavers
-bludgeons
-bassinet
-clause
-coiling
-cask
-boob
-azalea's
-afghanistan
-carriages
-blade's
-bobby
-asinine
-acclaiming
-absorbed
-blacken
-cheating
-bootleg
-anonymous
-addict
-astonishes
-awry
-adequate
-categorization
-casks
-blaster
-aspirants
-abscesses
-airing
-assumptions
-capitalists
-board
-asynchronism
-body
-aye
-contraction
-athens
-arsine
-cohabitations
-below
-bows
-aviator's
-ampoule
-connective
-adapter
-authenticate
-blackboard
-brilliant
-appoints
-attics
-conquer
-boning
-comestible
-camped
-blonds
-aisle
-coals
-billboards
-characterizers
-crow
-clout
-admirer
-actuarially
-abstruse
-accessing
-bonfires
-clenched
-characteristic
-catching
-chars
-canons
-barrier
-championed
-butterflies
-completely
-calendar
-artwork
-abjections
-burgher's
-correlates
-arrivals
-accepters
-circuses
-breadboards
-accomplishment
-analyzed
-appropriates
-cancel
-bordering
-aperture
-civilizing
-assortments
-blackest
-blitz's
-copy
-commenced
-admirers
-cheers
-croppers
-cliff's
-circumstance's
-bibles
-buttressed
-consecutively
-birefringence
-automaton
-cheerless
-chopping
-ballooned
-convent
-acknowledgers
-appointing
-belies
-comeliness
-bangle's
-communication
-bisector
-avocations
-clique
-brainstem
-campusses
-allocators
-bramble's
-assaults
-commemorate
-appendix
-agent
-apportioning
-bottled
-artifact's
-block's
-archery
-bagatelles
-candies
-catched
-cognitively
-creepers
-concentrated
-bout
-balustrade
-abodes
-carrying
-confirming
-cannibal
-chinners
-carbonate
-anguish
-butt
-colons
-ablated
-corporation's
-cock
-convincers
-beret's
-bluish
-compressive
-authenticates
-commemorative
-bureaucracies
-coinage
-coach
-assigning
-concentrators
-capitalizing
-appraisals
-belaying
-candy
-blossomed
-bricks
-atonal
-analogue
-caters
-barbaric
-applique
-clink
-audio
-actress
-assyrian
-apprehension
-conversation
-apsis
-bedevil
-comics
-affricate
-comings
-buttress
-angering
-buckboards
-bombed
-adversely
-adequacies
-commended
-causeways
-adherers
-codes
-aquaria
-ape
-bulks
-compactly
-brainwashes
-bleats
-commandants
-conditionally
-adjourns
-clobbering
-allowances
-buildings
-complemented
-blanker
-algeria
-brief
-creak
-adductor
-categorizer
-approacher
-argument's
-clocked
-bedazzle
-cause
-coordinator
-buildup
-countenance
-abhorrer
-backtracked
-bogus
-closer
-broilers
-chirps
-adjournment
-belles
-bitingly
-befogged
-contexts
-amorous
-breeding
-abortions
-blockage's
-alternatives
-bouncing
-beryl
-ballistics
-banters
-carpenters
-auction
-bowdlerizing
-brazen
-bonuses
-circulated
-adultery
-archival
-bears
-baptized
-burglaries
-borrowing
-barbarous
-casher
-adolescents
-atrophic
-busily
-aerating
-coatings
-athenians
-casing
-consuming
-alphanumeric
-beaches
-bisection's
-conjecturing
-aspirate
-biography's
-accompany
-bureaucrat
-broomstick's
-colony
-coalesce
-clock
-bequeath
-collaborates
-belonging
-configured
-burlesques
-anode
-consenter
-bug
-counterpoint
-counts
-bangladesh
-analogical
-accident
-bulky
-affinities
-abysmally
-boorish
-assiduously
-cannisters
-autocollimator
-bassinet's
-barrelling
-blurts
-carbonize
-candle
-act
-addressees
-constraints
-boast
-complaining
-coziness
-avocado
-coolest
-blank
-beadles
-anytime
-covetous
-appellant's
-angers
-academies
-ageless
-chased
-constitution
-consonant's
-boosting
-ascetics
-aerosol
-apse
-blushes
-clang
-confers
-confidentiality
-coolie
-colon's
-chickadees
-badminton
-argonaut
-constituting
-aloha
-contracts
-broomstick
-brackets
-attendant's
-connection's
-conciseness
-abstractor's
-composes
-chaste
-assures
-conjuring
-barbital
-bunion
-bases
-clowns
-barrelled
-audience
-auctioneer
-complexly
-aviator
-conjectures
-backscatters
-cheerfulness
-communicating
-agreement
-bricklayers
-bilabial
-abstruseness
-cobol
-cooperating
-admit
-blundering
-accelerates
-assaulted
-concealing
-anachronism
-bowels
-butane
-anniversary's
-converts
-convoyed
-climates
-barriers
-clubbing
-additives
-bask
-confessing
-caravan
-colonizes
-continuous
-cheerlessness
-boggled
-armpit's
-bridgework
-allegro
-cricket
-cannon
-adoption
-clanging
-auscultations
-billowed
-alphabetize
-airlift
-appointee
-boyfriend
-chaotic
-corrections
-bonus
-contrasted
-convulsion's
-confessors
-adumbrating
-autocrat's
-coronary
-authentic
-barley
-brawling
-aegis
-appends
-bolshevism
-charted
-applicant
-aileron
-considers
-chin's
-alkyl
-amendment
-boulevard's
-avian
-breather
-canyons
-cannon's
-apportion
-badgered
-augers
-advisers
-censuses
-beveling
-aught
-arthogram
-anonymity
-appliance
-atmospheric
-anesthetizing
-ambulances
-blustering
-burnt
-chestnut's
-collects
-aliment
-anxieties
-championship's
-channeled
-arrival
-amassing
-corpse
-bedtime
-blackbirds
-cats
-constants
-chemistry
-brewery
-brother's
-boasts
-accentual
-bellwether
-bely
-courted
-baroness
-configure
-collection
-aviary
-achieves
-belfry's
-beech
-baseman
-bacterial
-contestable
-blond
-contracting
-comparably
-consultation's
-booster
-conspiracies
-belief
-candidate's
-boardinghouses
-connectivity
-check
-crazy
-collided
-assistant's
-critic
-bilateral
-cheapening
-appalled
-autopsy
-balled
-abnormally
-acquires
-aloofness
-backwaters
-combative
-computerizing
-craters
-contributorily
-behaved
-comers
-axiomatizations
-analogously
-banjo's
-cleanser
-capitalizes
-chamberlain
-aggregates
-amenorrhea
-begins
-condone
-cleaved
-bustard
-adsorb
-airedale
-bridles
-audited
-could
-amour
-checkbooks
-admiring
-arrested
-commerce
-asbestos
-can's
-clamping
-bathers
-acknowledgments
-census
-acrobat
-bargains
-apogee
-creaking
-busboy's
-additional
-chants
-circumvents
-afloat
-anyplace
-alumnae
-anions
-classroom's
-ballerina's
-convents
-angered
-climbers
-citation
-cools
-clamor
-capaciousness
-beatific
-abrades
-advocating
-coverings
-claims
-brethren
-advertised
-atrophies
-coffer
-beagle's
-brazenly
-bitterly
-clergyman
-braiding
-compressible
-convicting
-agreeableness
-antithesis
-cogently
-botanist's
-bidirectional
-bewilders
-airlock
-costumer
-blamelessness
-agglutinins
-catalyst's
-allocation
-annunciates
-borderings
-accomplishes
-confronters
-clinically
-breadbox's
-canvassed
-communicative
-coercing
-backpointer's
-bramble
-congregations
-crave
-courtesy's
-cocoon's
-admitting
-chieftains
-acclimate
-consequences
-cones
-contradict
-axolotls
-contractual
-artist
-atrociously
-consecutive
-berated
-bluing
-attacks
-choruses
-blatant
-balance
-amplifier
-assist
-analyst's
-ambler
-conveyance
-compromising
-baffler
-corridor
-bed's
-condoned
-boulevard
-anomie
-averages
-basics
-apologia
-cabbages
-concretes
-alcoholic
-aliased
-chocks
-balsam
-collies
-censor
-arouses
-conundrum's
-academically
-bent
-codings
-coastal
-allots
-acclaim
-citations
-cantor
-circularly
-boarder
-caribou
-biologist's
-cowling
-connects
-chasing
-bootstrap
-backscatter
-abstractly
-corrupt
-alleviating
-biasing
-abrade
-arraignment
-beaten
-blanketing
-compactness
-adage
-coincided
-borate
-bra's
-concepts
-bootleger
-christian
-argos
-basal
-abate
-campuses
-abridging
-confusers
-cabin's
-audition's
-amphibians
-attractively
-adhesive's
-ascendency
-beforehand
-ache
-brokers
-bowler
-criminally
-american's
-chock's
-artillerist
-appropriation
-characterization's
-artifices
-annoys
-constituents
-bottle
-beaned
-consisting
-beholding
-ceremony
-carpeted
-absolutely
-anorexia
-accredited
-azaleas
-amaze
-commit
-afflicting
-contriving
-adventure
-blood
-blabbing
-absoluteness
-appreciable
-approachers
-bumptious
-behavioristic
-anticipates
-adults
-barnyard's
-banging
-banana
-bilge's
-aware
-coheres
-bronchi
-commissioned
-arrogation
-confines
-core
-attenuation
-afterwards
-clearing
-applies
-alphabetized
-cemetery's
-campaigning
-abolishes
-brig
-cheer
-combers
-backtracker
-clinker
-clouds
-clog
-berries
-advising
-childish
-clobbered
-bride's
-astrophysics
-canker
-concatenate
-bite
-chagrin
-bodybuilders
-calamity
-admiralty
-councillors
-competitive
-assessments
-copper's
-cabling
-casket
-conducted
-backplane
-boyfriends
-bingo
-broader
-confiscates
-communicated
-baton
-cocktails
-albanians
-boardinghouse's
-brats
-akimbo
-categorizers
-comparator's
-blackbird's
-accidentally
-companion's
-clippings
-accosted
-bell's
-burly
-aggregations
-boathouses
-airmails
-abreactions
-changers
-carbon
-cleaners
-bookkeeping
-correlations
-backer
-conclusions
-brainstem's
-anecdotes
-chateau
-cogitating
-amphibious
-compounded
-completeness
-comptroller's
-boatswain's
-bolstered
-acquiescing
-actors
-calorie's
-adaptability
-abstractor
-bimolecular
-belly's
-automobile
-automotive
-analyticities
-awesome
-colonizer
-approximated
-chemist
-coronet's
-classmate
-anteater's
-altars
-adulthood
-amid
-assails
-blizzards
-corroborative
-biographer
-compartment
-blooded
-bipartisan
-bluff
-aloof
-bronchiole
-clincher
-congratulations
-ablation
-caught
-collier
-chooses
-antidotes
-artery
-clearance
-civility
-basketball
-auscultated
-behaviorally
-crowning
-autobiographical
-cheaply
-brutally
-agonizing
-clerk
-comprising
-baller
-confuses
-acquiesced
-astonishingly
-birthplace
-covered
-chopper
-combinator
-benignly
-bedside
-blasts
-billboard
-appraise
-aboveground
-comforter
-credulousness
-battlefield
-barefoot
-cleverness
-apparatus
-bartering
-bromine
-aerodynamic
-crabs
-chains
-airflow
-allegrettos
-armchairs
-blacklist
-approvals
-bait
-collections
-antecedent's
-airbags
-casted
-content
-conferrer's
-crouching
-coughs
-canal
-amphetamine
-augustly
-bedraggle
-arithmetic
-cataloger
-alluding
-credulity
-coffees
-crueler
-beautifully
-caresses
-correlative
-consul
-criticizing
-couched
-baths
-alchemy
-bargain
-accomplishments
-conveyer
-benevolence
-broil
-chilling
-axed
-attire
-collisions
-categorizes
-cited
-aeration
-accommodating
-coordinations
-boxcar
-cattle
-bullion
-afternoon's
-captures
-afghans
-comets
-component's
-ark
-bounds
-adjusting
-bravely
-capability
-chap
-absolving
-aspirating
-arcs
-conspires
-collaborated
-admonishment
-astounds
-brasses
-compromise
-changed
-consumers
-connoting
-buttonholes
-cordial
-anionic
-chastisers
-archive
-alleviate
-burglarize
-acquainted
-copiers
-cashers
-antisocial
-creations
-bookie's
-censure
-beadle's
-banded
-circled
-bulged
-cheapness
-attorney's
-chewer
-bookshelf's
-councillor
-assertion
-broom's
-contemplations
-club's
-balkans
-cherubs
-alas
-chair
-apologizes
-compartments
-beyond
-aptly
-censured
-allegros
-boosts
-card
-arithmetizes
-attainment's
-arrester
-anding
-asker
-compatibilities
-confidentially
-commissioning
-cleaner
-aversion
-cooperative
-battalion's
-cemented
-charity's
-conceited
-capable
-anymore
-computing
-aping
-chiefly
-affair
-beaners
-allying
-caption's
-antipathy
-causal
-abyss
-botchers
-burglarizing
-confidant's
-activator
-continent's
-census's
-brat's
-antagonism
-bedspring's
-antiserum
-charge
-connector's
-alike
-believable
-belfry
-cast's
-bureaus
-beneficiary
-abolisher
-artichoke's
-broadly
-concurrent
-alteration
-bookies
-crafts
-bays
-ass
-bouquet's
-ave
-chords
-crazes
-anemic
-appoint
-beets
-billing
-contest
-assassination
-allot
-brindled
-acute
-absolves
-adsorbed
-auxiliaries
-belatedly
-businesslike
-assassinates
-bookkeepers
-bevel
-adders
-automate
-archangels
-breakfasted
-changeability
-contested
-cradles
-combatants
-besieging
-certainty
-attempts
-bankrupting
-compiler's
-complications
-banquets
-ancestor's
-ail
-abbreviating
-compacter
-approvers
-acknowledges
-comically
-almonds
-counsellors
-calmness
-assailed
-crane's
-baser
-big
-corruption
-circuitry
-briefness
-community's
-banquetings
-alms
-bass's
-bellowing
-adoption's
-blockading
-compellingly
-builders
-befallen
-bombproof
-cartons
-chore
-crimson
-anther
-clucks
-assemblies
-beatitudes
-aspiration
-compels
-angst
-balancing
-bowstrings
-bayonet's
-butte
-biomedical
-casualness
-accolade
-blackberry's
-bunched
-affright
-clung
-burlesque
-bare
-corrected
-arbitrate
-cropping
-coherently
-bloodhound
-circularity
-courtesies
-articulating
-concluded
-analogy
-brutalized
-airmail
-cooperator
-cousins
-centralization
-bibbing
-beside
-bravo
-abductors
-cars
-bovines
-bump
-absconding
-chins
-chasers
-boundary's
-antecedents
-awed
-counselled
-aback
-attenuator's
-blazer
-bettered
-awaken
-abreast
-beagles
-artisans
-buckled
-credence
-control's
-bewhiskered
-calloused
-breathe
-collaring
-blossoms
-bring
-actualities
-bivalves
-animals
-cowboys
-constituency
-affordable
-acrobatic
-attiring
-boatswain
-concurrence
-abrasions
-babel's
-cowerers
-chiffon
-bostonian
-criterion
-blinds
-cased
-affections
-conditioners
-clutter
-accrued
-attractors
-botcher
-compunction
-bludgeoned
-censored
-allah's
-chronic
-burrs
-commodity's
-appraiser
-asserters
-cheaters
-besting
-anchorite
-combine
-afforded
-cigarette's
-bathrooms
-apostles
-chloroplast
-bootlegging
-bibliographical
-beans
-bylaw
-benefited
-brochure's
-cordially
-brashly
-beastly
-bologna
-alderman's
-burning
-billow
-convert
-buffaloes
-comparatives
-assistances
-camouflaged
-announcement
-bobwhite
-brawl
-adducted
-cavern's
-affectation's
-bandying
-brunette
-architect's
-aphorisms
-cremate
-bray
-billed
-conception
-battlefield's
-bandaged
-broaches
-bazaar's
-beatification
-bigotry
-clergy
-abstains
-befits
-bantering
-conceivable
-attachers
-analogies
-bimonthly
-august
-additionally
-confirmation's
-ballooning
-cardboard
-belle's
-counterparts
-candor
-bishop
-comprehension
-affronted
-bravura
-courting
-antidote
-buggies
-arisings
-appendix's
-bright
-categorize
-cooking
-agnostic's
-billets
-amok
-bewitching
-audiograms
-column's
-bussed
-checkbook
-alteration's
-atherosclerosis
-broached
-based
-cacti
-boardinghouse
-bowdlerized
-anchoritism
-achievement's
-bald
-cover
-codifications
-capacitor
-brashness
-causes
-acyclically
-argument
-boarders
-audiometer
-compute
-contribute
-crisply
-bitters
-circumvent
-assailant
-bosun
-buyers
-alibis
-blurting
-coasts
-bivouacs
-arrogating
-albanian
-attempted
-acquisitiveness
-applauding
-alfalfa
-cantors
-canonicalizes
-alkaloid
-bruising
-associativity
-budgetary
-carbolic
-clashing
-buffalo
-acorn
-analyzing
-backyards
-comedian
-betwixt
-aces
-chartered
-additivity
-becalm
-combat
-characterizations
-clinics
-bulbs
-bloc
-amenable
-civilian's
-breech
-attainment
-bounding
-compiler
-cotyledons
-billboard's
-caper
-aphasia
-chester
-combats
-biddable
-articulates
-caps
-assignees
-bifocals
-beady
-chinese
-assertions
-allegation
-championships
-accrue
-containment's
-croaking
-classifying
-annum
-brightened
-bits
-appointer
-besieger
-citizen's
-cerebral
-canto
-bakers
-capitol's
-authorizer
-blockaded
-anodizes
-alarmed
-buttressing
-attenuates
-bumptiously
-chronological
-colleges
-coward
-contraption's
-abstractions
-controversial
-boric
-bids
-agents
-backpointer
-bumped
-bottoms
-bowlines
-captivated
-article
-cliche's
-chases
-choker
-bremsstrahlung
-consult
-adjudged
-auctioneer's
-covers
-accurateness
-clues
-bugler
-bareness
-cedar
-alleviation
-anesthetically
-backpointers
-arched
-administered
-arrowhead
-continues
-asks
-confessor's
-allure
-backlogs
-childishness
-appointive
-covering
-conscience's
-bellows
-blanked
-considerations
-appalachian
-aerate
-budged
-city's
-accordion
-cliche
-collectors
-comprehensive
-boomed
-chariot
-baffling
-bunkmate's
-bumbles
-contaminating
-corroborating
-applications
-bursting
-cabbage
-befalling
-acquittal
-compromisers
-components
-arpeggio
-brothel's
-credibility
-begrudge
-confirmation
-academy
-appertains
-calibrates
-bureaucrats
-bawl
-costuming
-biography
-adoration
-cloaks
-aggregating
-business
-aphorism's
-carters
-admixture
-coexistence
-anomalously
-adapts
-amide
-affiliation
-capillary
-biscuit
-brainy
-bellhops
-chartings
-cohered
-austria
-champions
-basin's
-cascading
-consultants
-bison's
-admixed
-arithmetically
-clothed
-betterments
-conspirator's
-addition
-adolescence
-bolsheviks
-abominable
-breathless
-cozy
-arouse
-bumble
-about
-apace
-astronaut
-asteroid
-cable
-crab's
-beachhead
-assets
-analyses
-bisection
-coconuts
-alleys
-armament's
-bloodstains
-arpeggios
-apologist
-blithely
-anabaptist's
-beadle
-channelled
-confuse
-annoy
-beautifiers
-cheats
-clenches
-amuse
-bewail
-constitutional
-birth
-appendixes
-amazed
-berry's
-bilingual
-blustery
-amplification
-clogged
-blackmailing
-breakables
-adduct
-bondsmen
-conferred
-codewords
-bequeathal
-abundantly
-banner's
-atrocity
-congested
-closely
-absolution
-concatenations
-anarchic
-crag's
-communicators
-cavities
-comptrollers
-backstage
-bewailing
-charcoal
-conveyances
-collar
-bores
-briefest
-comments
-awning's
-associator's
-antarctica
-correspondingly
-bidden
-ad
-clings
-bit's
-apollo
-bulldogs
-chateau's
-amounting
-cogitates
-bellhop
-bookish
-bout's
-cannister
-bicep's
-asses
-beef
-battlefields
-consort
-auspicious
-breezy
-buried
-beverages
-approximates
-conduction
-bleakly
-blanketers
-ascertained
-absentminded
-bolivia
-births
-behave
-bilk
-breaths
-charter
-abstaining
-appareled
-boulder's
-breadwinner's
-correct
-accessed
-befitted
-adulterer
-axe
-activation
-betrothed
-asymptote
-bullet's
-clusterings
-baud
-bustling
-ballplayer
-constraining
-cleared
-brown
-affirmed
-agencies
-churches
-backyard
-burntness
-bronchioles
-charmers
-backscattered
-abridgment
-claw
-blow
-adjourning
-constantly
-brightens
-autobiography
-cards
-bypassing
-alcibiades
-concurrency
-chuckles
-bests
-belligerents
-adjustments
-bolshevik
-cabins
-astronomically
-cartridge
-boxcars
-boned
-bottomed
-burgeoned
-adjourned
-apprenticeship
-chastiser
-breached
-boycott
-butchered
-coordinating
-cottage
-brainwashing
-confinement
-bandies
-absentee
-collapses
-cruel
-along
-alloy
-convoying
-assignment's
-crisp
-ambidextrously
-blindfolded
-chilly
-condenses
-avers
-broiler
-anesthetics
-beaker
-cholera
-brag
-coffins
-cranked
-allocator
-brutality
-acquire
-blushing
-briar
-abolish
-crossovers
-broiling
-consolers
-beatify
-almanac's
-cooled
-commencements
-clasp
-committing
-condemnations
-altar
-by
-bombastic
-confederates
-bong
-concerted
-compilers
-counterproductive
-brig's
-accurate
-avidity
-cleavage
-blame
-conceive
-assessor
-consolingly
-concise
-computes
-alliance
-clucked
-axon's
-annunciating
-baseball's
-allusion
-brays
-auras
-blond's
-bronchitis
-ciphers
-blowing
-broth
-canonically
-baseness
-byline's
-appetite's
-colonists
-condensed
-cawing
-beaning
-broadening
-colonist's
-apocrypha
-chauffeured
-cored
-branding
-carrier
-assessed
-collegiate
-chirped
-accounted
-clubbed
-antibodies
-behalf
-alphabetizing
-conqueror
-alpine
-budgeters
-casements
-appropriate
-compliments
-cast
-accountancy
-cathedral
-conserve
-accorders
-arbitrarily
-cowing
-bars
-bagel's
-climax
-attention's
-cautioning
-centipede's
-almost
-abstractionist
-carpenter
-containing
-arab's
-courtesy
-carton
-accelerated
-bowman
-boastings
-banal
-bucking
-accomplishment's
-classification
-baldly
-abruptness
-calibrations
-blocs
-biking
-assenter
-adversities
-compartmentalized
-chemical
-attic
-audiogram's
-applauds
-crests
-bad
-bounce
-accelerators
-contemptuous
-attentions
-cancellation
-battles
-aging
-advantages
-anthologies
-answers
-bruised
-castes
-any
-coped
-arcade's
-adaptively
-arsenal's
-confessed
-controllability
-acceptor's
-abrogated
-abutted
-amusingly
-apology
-broils
-court
-boundaries
-bode
-collie
-adiabatic
-ambitions
-charged
-awfulness
-consorts
-botanists
-blurring
-absents
-batten
-backwoods
-breaks
-certified
-chattering
-admitted
-bathrobe's
-analogous
-corporacy
-bijection's
-combatant
-checked
-condition
-amoral
-bayed
-bedroom
-chanting
-antics
-charity
-blip's
-biped
-brilliance
-catchers
-booted
-anabaptist
-clothe
-comforted
-complaints
-coacher
-admissible
-bang
-concisely
-cookery
-capita
-assurance
-codifying
-benchmarks
-aunts
-commentaries
-anon
-applicators
-constructor
-associated
-abuses
-choicest
-confiding
-antislavery
-apron
-ashore
-cheerfully
-betterment
-administration's
-campaign
-cremated
-ambulatory
-bleacher
-afterthought
-barkers
-choir
-crossly
-conducive
-cache's
-battery
-actinium
-countryman
-cajoled
-appeasing
-beamer
-cleaves
-anthem's
-clearing's
-cooperated
-barker
-crowing
-apprising
-accusation's
-beginning
-associator
-booking
-caved
-amicable
-codify
-clairvoyant
-bevels
-becalms
-brawn
-bunkhouse's
-arms
-antiredeposition
-belt
-antiphonal
-cried
-brae's
-bridal
-acronym
-clay's
-checkers
-auxiliary
-bind
-compares
-agilely
-askers
-blankly
-antagonist's
-bimodal
-captivation
-creditable
-concentration
-calling
-bartender's
-autopsied
-correspondent's
-carnivals
-abjure
-bystander's
-bungle
-chanticleers
-conceding
-burghers
-boards
-accessions
-compensations
-arabian
-churn
-crowed
-centering
-abnormalities
-courtier's
-congregation
-aberrant
-annexing
-blockhouse
-anthropomorphic
-bedder's
-abutting
-conundrums
-affiliated
-cancellation's
-bolts
-ballgowns
-augmenting
-bureaucracy's
-bootlegged
-audiometers
-blueberry
-affliction
-appreciation
-codifier
-amasses
-countering
-crackle
-canoe
-consuls
-breathes
-broiled
-amalgam's
-bodes
-ballooners
-coating
-corollaries
-amphibology
-agenda's
-chafing
-alcoholics
-accredit
-anisotropy
-anchovies
-carriers
-acceptors
-betrayed
-buttocks
-busy
-bunny
-cropper
-accreditations
-bumblebee's
-adhesives
-civilize
-accedes
-abroad
-arch
-crept
-cotyledon
-alphabetic
-braille
-amateur
-adjure
-ascertaining
-budge
-adulterate
-additive's
-cardiac
-born
-brewed
-borneo
-bun's
-blue
-cackled
-acclimates
-airline
-blinder
-brokerage
-communicant
-central
-aggrieved
-asynchronous
-bough's
-acidly
-archaeology
-complementary
-animator's
-bodyguards
-climbs
-apathy
-constellation's
-acculturate
-archaeologists
-contingents
-control
-anglophilia
-billings
-corporate
-athlete
-accusing
-appear
-announcing
-accordions
-computerize
-combinations
-bile
-abut
-charger
-columnize
-computer
-blacks
-converges
-blamer
-bulked
-convincingly
-checker
-correspondence's
-accelerate
-accessible
-conceivably
-abscissa's
-adsorbs
-anglophobia
-anomic
-casters
-churning
-crease
-brood
-appendage
-bulwark
-bombers
-arcaded
-breadboard's
-aphrodite
-color
-commodore's
-answerer
-bobolink
-cloth
-conversion
-clime
-artery's
-birthplaces
-compiled
-arrack
-beetles
-bobs
-compatibility
-cocoon
-counterpart
-audible
-colonies
-airport's
-beige
-cogent
-bromide
-begrudging
-acids
-crucifies
-beggary
-archipelagoes
-availably
-counterfeiter
-blanketed
-amending
-accelerometer's
-advisors
-byway
-alignment
-amber
-austin
-copyrights
-beaus
-brigantine
-comforts
-appointment's
-crawler
-bangles
-contemplation
-concur
-characterizing
-censoring
-charters
-catalogues
-appropriately
-builds
-aeronautic
-confused
-comber
-axially
-cackler
-coercive
-ambassador
-arcades
-brash
-amorality
-belittling
-battling
-bloodied
-acrylic
-bantered
-clasped
-carcass
-archangel
-annunciators
-aristotle
-boulder
-burglarproofs
-chooser
-abilities
-calmest
-bach
-always
-blaspheming
-crossover
-bakeries
-clocks
-ankle's
-accidental
-arbitration
-chirp
-aeronautical
-boy's
-acidic
-bowline
-anonymously
-cod
-couplers
-beautifications
-bluffing
-backarrows
-brow
-covenant
-acronym's
-banning
-albeit
-ascetic
-burn
-animator
-beatnik's
-coveted
-cipher's
-broke
-cap
-bellman
-bulldozed
-clarifies
-bathes
-blip
-availabilities
-booth
-clangs
-audiences
-cathedrals
-confounding
-bigot's
-beecher
-arts
-company
-attributed
-avenged
-bawling
-caustics
-alee
-bordello's
-banks
-affords
-complied
-commas
-collaborate
-aquatic
-ambitiously
-burro's
-beard
-bittersweet
-candlestick
-bylaws
-broadcastings
-believe
-barrels
-braying
-certifications
-contrasts
-crashes
-audition
-confine
-bucks
-abates
-bureaucracy
-ambles
-besiege
-broccoli
-antibiotics
-attenuators
-accelerometer
-caste
-bib's
-browbeaten
-appurtenance
-bauxite
-asceticism
-case
-chewing
-aerator
-achievements
-barricade's
-agglutinates
-bewildering
-cartridge's
-children
-bufferrer
-actuator
-converging
-bolted
-chat
-combs
-chemist's
-adduced
-algebraic
-circular
-bloated
-conclusion
-burgess
-certifies
-absconds
-comprise
-benzedrine
-bumbler
-banjo
-allow
-appealing
-cooperation
-abraded
-chaperoned
-biracial
-braced
-censurer
-acoustician
-appraised
-benefitting
-constructs
-convertible
-administrative
-asocial
-area
-creature
-besetting
-crater
-begrudgingly
-blanket
-ablest
-alba
-airplanes
-allowing
-briefly
-beneficences
-concurring
-adjective's
-cork
-aerospace
-anomalies
-asher
-auger's
-boilers
-abhorring
-broadenings
-bladder
-belay
-approver
-abdominal
-commends
-cringing
-billiards
-beater
-auspice
-contrasters
-bights
-absentees
-atoll
-cooler
-activator's
-basement
-burgeon
-allusiveness
-codeword's
-bandage
-contemplate
-adopted
-coping
-carving
-baptism
-colds
-altos
-background
-closet
-commuted
-acre's
-aliens
-council
-cans
-cheese
-ally
-aseptic
-belgian's
-crossbar
-addressed
-commons
-call
-careers
-breakfasting
-brazilian
-catholics
-bachelors
-consultant
-brighter
-crossword's
-burglar
-avoidable
-batting
-cigar
-amps
-axiological
-combed
-comforters
-albumin
-cookies
-booming
-archaize
-canton's
-bunkmate
-combination
-bondsman
-anxiously
-affixed
-associatively
-cigar's
-backstitch
-calls
-captivates
-commodities
-atmosphere's
-asserting
-beaver
-beatnik
-container
-activists
-consoler
-commoner
-buttonhole's
-abhorred
-aggregate
-cliff
-antidisestablishmentarianism
-broach
-ambling
-comer
-bited
-advocated
-behaves
-bosom
-continents
-conserves
-bashful
-ago
-backarrow
-circumventable
-avocados
-briar's
-annuls
-barnstorming
-aired
-carry
-crossbar's
-aspire
-beards
-abides
-cliques
-completes
-brassiere
-absorbs
-annul
-chairman
-baron
-battens
-africans
-abatement
-colonization
-carries
-borough
-allurement
-breakfasters
-alkali
-acoustically
-corners
-capturer
-casualties
-asphyxia
-animized
-administrator
-belying
-basketballs
-bylines
-bandit
-autopsies
-braining
-contradiction's
-antic
-butted
-bacillus
-blurt
-conditioned
-backers
-agreeable
-almanacs
-cider
-chicken
-chambers
-clutch
-assailant's
-conveyers
-amazers
-beribboned
-breeder
-caveat's
-buffers
-combination's
-ampersand's
-crafting
-clanged
-caving
-aspirant
-butlers
-adjective
-auckland
-announced
-creators
-caches
-baseline
-codifies
-baptism's
-coarsened
-cohesion
-airman
-avenge
-backaches
-budgeted
-armpit
-bicycled
-converged
-besmirched
-autonomic
-coming
-assemblage's
-chained
-admissions
-alcoholic's
-branches
-bunk
-anciently
-bloods
-adventurers
-amazes
-coloring
-abstractors
-adaptation's
-boar
-amulet
-agglutination
-conquerable
-booker
-confronts
-barometer's
-bedbugs
-barricades
-cheap
-bewitch
-circus
-backward
-archeology
-automobiles
-bending
-amino
-beckoning
-admits
-berliners
-borer
-clambering
-atomizing
-banner
-blissfully
-catchable
-breakdown
-abjured
-computerized
-chaplain's
-amphitheater
-ballot's
-craziness
-croaks
-counties
-adopting
-breast
-airstrip's
-basin
-contemplating
-commitments
-critique
-appears
-bellies
-baccalaureate
-abducted
-blackened
-animosity
-appraising
-antiquity
-assistants
-asthma
-bootstrapping
-bounties
-agleam
-advertisements
-benches
-artful
-broadens
-chuck's
-betrayal
-blasphemed
-brooms
-castled
-coroutine
-conscious
-beetle
-banshee
-advertising
-baring
-awakens
-balm
-billions
-compromisingly
-ballroom's
-burrower
-bayou's
-ambiance
-beheading
-bought
-adagios
-adornment's
-anointed
-abolishment's
-anesthetizes
-badly
-boyishness
-consultant's
-cheek
-cannibals
-breakdowns
-assured
-agates
-bicker
-appliances
-cafe
-bagpipes
-adrenal
-combinatorially
-belligerence
-bricked
-adjacency
-aimless
-crook
-cherry's
-assessing
-brushfire
-cormorant
-captained
-blundered
-conceptually
-congress's
-contraster
-ambushes
-bronze
-autotransformer
-corded
-brisker
-contently
-announcements
-bullet
-apportionments
-columnized
-canon
-conservation
-algaecide
-blackening
-compassion
-beaks
-constructibility
-chapter
-abscond
-costly
-bacon
-coldest
-aptness
-billionth
-altercation
-approbation
-alternator's
-criticizes
-befell
-canopy
-buoyant
-brazil
-anticipate
-absenteeism
-champion
-aesthetics
-cadence
-betroth
-confidants
-bean
-braid
-aphids
-cluttering
-cantankerously
-bloom
-barbarity
-clawing
-bogged
-agreed
-asia
-abrasion
-corporals
-baselines
-box
-chartering
-apotheosis
-ampersands
-conceit
-creamer
-adhered
-circuit
-carpet
-accompaniments
-boomerangs
-blindness
-chipmunks
-bewitched
-allocate
-bicycle
-compacted
-cab
-calcium
-cellists
-apex
-borrows
-completed
-brightly
-constables
-ascertains
-conspiracy's
-badgers
-bunion's
-anabaptists
-broadband
-clefts
-accepted
-benched
-catalogued
-cadenced
-alliteration
-acquiesces
-boxcar's
-athlete's
-bracing
-cremations
-analysis
-crossings
-assorts
-apologize
-brazier
-configurable
-basking
-craves
-belle
-conversation's
-belligerent
-anesthetize
-brewers
-cackles
-adventures
-airlock's
-booklet's
-apply
-anecdotal
-bewails
-computer's
-autographs
-acclimated
-coefficients
-avidly
-beckoned
-broadener
-bulk
-blacklisting
-belly
-acquit
-convoy
-achiever
-aversions
-advisor's
-captor's
-camel's
-asset's
-advantageous
-basement's
-confident
-crescents
-compiling
-butler's
-cartoon's
-adaptive
-chlorine
-abets
-cruelly
-amiable
-baleful
-ceiling's
-adumbrated
-cherry
-aspirant's
-cashing
-candidly
-chaff
-bitter
-brim
-alcove
-bulb's
-carbonizers
-citizen
-attic's
-breed
-consumer
-conferrers
-accommodations
-contrapositive
-beget
-brilliantly
-attentionality
-continuation's
-bosses
-brave
-configurations
-benediction's
-conferring
-accessor's
-bobolinks
-bulled
-cleanness
-algorithm
-advancements
-altogether
-accumulations
-albacore
-bowing
-belching
-apical
-consequentiality
-bagpipe's
-ambrosial
-bullying
-cleans
-attendance's
-complimenter
-blink
-cager
-assembling
-coat
-allowable
-astringent
-antiresonator
-cardinal
-clicks
-commentator's
-blossom
-categorizing
-amphibian's
-commonality
-consonant
-classics
-affable
-accorded
-aimlessly
-archetype
-administerings
-boldness
-anatomy
-apprehensively
-absence's
-actuality
-attempting
-categorical
-checkpoints
-allemande
-corer
-behoove
-bleaches
-bough
-blended
-blotting
-baptists
-courtship
-benevolent
-bumptiousness
-chum
-anguished
-auto
-career
-bookstore's
-carbonized
-autocratically
-cherishes
-attendees
-contends
-anastomotic
-attributing
-abbot
-came
-blunt
-battlement's
-affection
-coordination
-annotate
-besets
-bucked
-boasting
-benedictions
-adherent
-blimp's
-acknowledging
-cleverly
-applejack
-annexation
-bat's
-cantons
-beetled
-closed
-country
-creatively
-bakery
-blasphemously
-chalking
-bold
-attended
-crasher
-backtrackers
-artist's
-bracelet's
-allowably
-affiliating
-arrant
-brayed
-barbells
-consigned
-abolishers
-climatic
-atrophying
-amigo
-arsenal
-ascribes
-converses
-aura's
-allotted
-bliss
-classical
-bigger
-ahead
-chopped
-blade
-casualty
-acceded
-bottling
-axon
-casement's
-battlefront's
-convinces
-alerting
-advertisers
-blemish
-agglutinating
-commonplaces
-autocorrelation
-armistice
-crediting
-besmirch
-amplify
-auscultation
-befalls
-called
-alnico
-arbiter's
-abort
-argonauts
-cessations
-cribs
-blare
-aforementioned
-condemners
-contaminated
-complained
-bootstrapped
-criticism
-cooperatively
-binding
-bullies
-basins
-contrived
-assort
-adulterously
-booms
-abandons
-also
-appealed
-count
-contributed
-beet
-crashers
-carryovers
-clays
-blackness
-cosmetics
-awkward
-blurted
-bothers
-analyzer
-backups
-alarming
-bicyclers
-credit
-abrogate
-audience's
-architecturally
-alibi's
-complicator's
-chuckle
-corporately
-banishment
-communist's
-birdie
-asymptotic
-break
-braze
-benzene
-bridgework's
-beak
-agitators
-collateral
-arranges
-bayonet
-breathlessly
-counsellor
-creates
-convulsions
-backdrops
-applicants
-altercation's
-commission
-breathtakingly
-corresponds
-backdrop
-armaments
-build
-biannual
-buttoning
-computational
-chaired
-bather
-critically
-amanuensis
-bantus
-confidential
-annoyance's
-carder
-authorizing
-acquits
-bipeds
-cocktail
-cinnamon
-burros
-brocade
-abdomen's
-creative
-acquisition's
-abdomen
-baited
-aristocratically
-alive
-committed
-arrestor
-cleaving
-comedy's
-baggage
-bra
-adaptors
-afoot
-bulls
-contoured
-amalgam
-comprehensibility
-amortizes
-biographical
-confront
-covert
-cravat
-animates
-booksellers
-bypass
-bootleggers
-bedfast
-affair's
-buzzer
-bellowed
-aligning
-bystander
-acclimatized
-accomplishing
-against
-blankness
-adopt
-addressing
-croaked
-boaters
-behooves
-audits
-boatyard
-cruise
-agnostics
-ailing
-anchorage's
-adaptations
-conceptualize
-advised
-cries
-bank
-actuators
-brazing
-catalyst
-beachheads
-aplomb
-compressed
-amputated
-contractor's
-bedspreads
-bowed
-coon
-chaplain
-cannons
-coffers
-assembly
-bouffant
-converters
-ampoule's
-borderland
-archaeologist
-blankets
-conserving
-avalanche
-assortment's
-aspic
-axle
-bereaves
-allowance
-carbonization
-bartender
-clawed
-coincidental
-appeared
-chipmunk's
-countable
-authenticators
-bestow
-alps
-caw
-aniseikonic
-avows
-blackmails
-controlling
-correlating
-audiologist's
-bit
-approving
-collapse
-coon's
-cleave
-atheists
-brigade
-autopilots
-bounteous
-commercialness
-accede
-cavalierness
-accustoming
-burnishing
-clobber
-aspirates
-brochures
-cellar's
-communes
-berkelium
-chickadee
-cobweb
-circumstances
-chose
-comprehend
-baritone's
-aggravation
-adopts
-cruelty
-and
-axer
-cautioned
-carbonic
-babbles
-bet
-charitable
-computable
-cardinality
-amenities
-confiscating
-catcher
-audaciousness
-complaint's
-cooperator's
-buddies
-baking
-constant
-classmate's
-accentuate
-choices
-crop's
-authorization's
-comedy
-brushy
-brotherly
-canals
-ads
-causeway
-abrading
-cemetery
-autocrat
-briefing
-abdomens
-apparition's
-consummately
-alkaloids
-bulkheads
-cravats
-bales
-campaigners
-bagpipe
-accentuates
-arm
-barometric
-bas
-agitator
-behavior
-abutters
-blockades
-alertness
-civilizes
-chinner
-anthropologist
-artificialness
-balkanize
-automates
-cackling
-anarchists
-amounted
-cereal's
-anodized
-cobblers
-acknowledgment's
-blear
-copper
-alphabetics
-blackboards
-apish
-answering
-afternoon
-arbors
-accused
-chickens
-agency's
-contractors
-contraptions
-cosmology
-anomaly
-bandstand
-attempter
-account
-challengers
-admiration
-calculations
-autocracy
-analyticity
-accord
-buildup's
-commonly
-babbling
-adjudication's
-attain
-ameliorating
-candlestick's
-chronicles
-align
-consensus
-agate
-adulation
-aspirated
-conclusive
-biologists
-cracks
-conform
-chambered
-beryllium
-connote
-amusing
-aquifer
-ankle
-batteries
-conservationists
-accountants
-apiaries
-actinometer
-beckon
-clearances
-clouded
-antitoxin's
-consolation's
-collectives
-boxtops
-bombarded
-bombarding
-bluest
-allusion's
-construction
-ballpark's
-codified
-coincidence
-celebration
-chip
-beginner's
-algerian
-boo
-athletics
-condenser
-bytes
-beauties
-concerts
-conductors
-awl's
-agitations
-buttered
-codifier's
-armory
-ascii
-aspirin
-arthritis
-bylaw's
-conformity
-blasting
-coinciding
-aphid's
-ceremonial
-banisters
-bristle
-bid's
-buckboard's
-bandied
-biopsy
-ballrooms
-chloroplasts
-bidding
-boil
-algebra
-constellation
-chuck
-cringes
-cleanliness
-apron's
-cosmopolitan
-bashes
-abusive
-believer
-conductor
-butters
-breweries
-allotment
-artfulness
-bunkmates
-blares
-connections
-anticipated
-classifies
-commandments
-beginnings
-bend
-brambles
-blacked
-basketball's
-affectionate
-cocoa
-anacondas
-busing
-bone
-birchen
-creamed
-aged
-commemorates
-brother
-aberration
-crawl
-actuarial
-apology's
-alumnus
-adversary's
-anaphoric
-aspiring
-consciousness
-cokes
-assignee
-boxing
-blanched
-camels
-contemporaries
-carnivorous
-assigned
-apologetically
-corpus
-accusations
-beefing
-champaign
-claps
-adherence
-aloft
-complication
-citizenship
-becomes
-compound
-arabesque
-bronchiole's
-appraises
-breach
-collection's
-botched
-bitches
-biblically
-bronchial
-amalgamating
-commoner's
-barbarian's
-arrange
-cradle
-conformed
-complimentary
-anodes
-cowering
-anoint
-brocaded
-bedazzling
-avionics
-burnishes
-bulkhead
-chink
-consciously
-contract
-clinch
-applicant's
-awning
-aloud
-chandelier's
-cathode's
-babble
-arachnid
-biplane
-clamorous
-assuredly
-consented
-axing
-avenger
-commence
-braving
-brandishing
-careless
-burningly
-boatsman
-channelling
-clarifying
-beggar
-berates
-cite
-cowered
-buffer
-condescending
-admixes
-bettering
-bedazzlement
-cord
-burglary's
-characteristics
-aptitudes
-adieu
-agree
-bends
-ceremonies
-accustom
-accessibly
-commanders
-ask
-cavalier
-brayer
-affront
-courser
-becoming
-carves
-configures
-beasts
-biters
-conditionals
-bodybuilding
-accretions
-chapter's
-cleverer
-corning
-brat
-classes
-almsman
-consumptive
-antique
-comprised
-beholders
-anthropologically
-buns
-bridge
-accretion
-acceptance's
-confederacy
-armorer
-argumentative
-crossword
-cowslip's
-analog
-counselor
-chastised
-barters
-clerked
-americas
-cloud
-aide
-alternators
-admitters
-bagatelle
-bridges
-civilizations
-anion's
-briton's
-apartment
-acquaints
-consummation
-chord
-coated
-barer
-carnivorously
-cheering
-allergy
-capacity
-classrooms
-assistantships
-complimented
-amphibiously
-commandment's
-audiogram
-corked
-badness
-bewildered
-assemblage
-backplane's
-asterisk's
-blob
-coexisting
-approximations
-counteractive
-barns
-adherer
-aborigine's
-brooding
-conceived
-adjustor
-cabled
-belongings
-breadwinner
-blot's
-brightness
-consigning
-barflies
-bisector's
-basing
-complement
-conditioner
-brazes
-crank
-antinomian
-crowd
-accelerometers
-befitting
-backlash
-bastions
-acceleration
-briefcases
-correlated
-baffle
-chew
-accosts
-agreeably
-bassinets
-cogitate
-concerning
-contouring
-broadside
-compact
-brainstems
-atom's
-bondage
-biter
-archdioceses
-basis
-bellboy
-blobs
-barons
-clods
-campaigned
-assessors
-bubbles
-annal
-casual
-altercations
-clog's
-biased
-arianism
-ancillary
-collaborator
-butter
-bureau
-blending
-antiquities
-brands
-activism
-crews
-beats
-broad
-buds
-baggers
-cobbler's
-condemns
-cabinets
-bomber
-blinders
-center
-contacted
-bewilderingly
-circulates
-burnings
-achieved
-belch
-barbecue
-angles
-comparative
-befuddle
-cherished
-chapters
-chanter
-allegation's
-armstrong
-converter
-combinatoric
-angrier
-brooks
-clinked
-blubber
-appointments
-compactor
-cleaned
-car
-contention's
-artificial
-cramp
-consistency
-aborting
-collaboration
-awarders
-crippled
-anaphora
-creamy
-buoyed
-baptistery
-altered
-anchoring
-alterer
-adjuring
-beacon's
-commencement's
-ascension
-candidness
-clouding
-cigars
-boiled
-christmas
-contingency's
-alum
-apparel
-contributors
-anisotropic
-annotations
-bushwhacks
-brides
-continuities
-carton's
-blurred
-antibody
-aorta
-blankest
-combinator's
-banish
-breaches
-accumulates
-bowling
-braver
-antibacterial
-cooperators
-banked
-compensated
-chartable
-conjunctively
-antelope's
-bluefish
-annoying
-composed
-barges
-biconcave
-australia
-ballparks
-bearers
-acknowledged
-advocates
-crossed
-competitor
-blaming
-andorra
-baritone
-collaborator's
-accessibility
-complains
-commentator
-bibliography
-conference's
-atmosphere
-agrees
-bedstead's
-ardor
-character's
-conventionally
-arena's
-chokes
-channel
-bludgeon
-convoys
-condense
-beautifier
-ailerons
-compacts
-black
-bell
-completions
-ballroom
-besotting
-conservatives
-adventured
-bulldog's
-conversely
-arroyos
-compositional
-alternative
-association
-broods
-beefy
-consolidated
-balms
-acquaint
-animal
-certificate
-combustion
-aims
-cracker
-abetted
-cautionings
-bread
-attains
-agriculturally
-courtyards
-bawls
-country's
-creator's
-checkbook's
-cliches
-colonizing
-biennial
-aqueous
-craftsman
-contrivances
-algorithmic
-crate
-barefooted
-bodily
-anthropologist's
-but
-climate's
-campers
-crackled
-awakes
-conveyed
-borrowers
-approached
-avoids
-crib
-albania
-bathrobe
-admonitions
-architectures
-consenting
-anastomosis
-blob's
-actual
-arrowhead's
-accountable
-allegiances
-commendation
-appearers
-comply
-concurs
-controversy
-abstracting
-artifact
diff --git a/storage/bdb/test/wrap.tcl b/storage/bdb/test/wrap.tcl
deleted file mode 100644
index 34ec451072c..00000000000
--- a/storage/bdb/test/wrap.tcl
+++ /dev/null
@@ -1,71 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 2000-2004
-# Sleepycat Software. All rights reserved.
-#
-# $Id: wrap.tcl,v 11.8 2004/01/28 03:36:33 bostic Exp $
-#
-# Sentinel file wrapper for multi-process tests. This is designed to avoid a
-# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
-# to sit around waiting for some random process that's not DB's and is not
-# exiting.
-
-source ./include.tcl
-source $test_path/testutils.tcl
-
-# Arguments:
-if { $argc < 3 } {
- puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
- exit
-}
-
-set script [lindex $argv 0]
-set logfile [lindex $argv 1]
-set args [lrange $argv 2 end]
-
-# Create a sentinel file to mark our creation and signal that watch_procs
-# should look for us.
-set parentpid [pid]
-set parentsentinel $testdir/begin.$parentpid
-set f [open $parentsentinel w]
-close $f
-
-# Create a Tcl subprocess that will actually run the test.
-set t [open "|$tclsh_path >& $logfile" w]
-
-# Create a sentinel for the subprocess.
-set childpid [pid $t]
-puts "Script watcher process $parentpid launching $script process $childpid."
-set childsentinel $testdir/begin.$childpid
-set f [open $childsentinel w]
-close $f
-
-puts $t "source $test_path/test.tcl"
-puts $t "set script $script"
-
-# Set up argv for the subprocess, since the args aren't passed in as true
-# arguments thanks to the pipe structure.
-puts $t "set argc [llength $args]"
-puts $t "set argv [list $args]"
-
-puts $t {set ret [catch { source $test_path/$script } result]}
-puts $t {if { [string length $result] > 0 } { puts $result }}
-puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
-
-# Close the pipe. This will flush the above commands and actually run the
-# test, and will also return an error a la exec if anything bad happens
-# to the subprocess. The magic here is that closing a pipe blocks
-# and waits for the exit of processes in the pipeline, at least according
-# to Ousterhout (p. 115).
-
-set ret [catch {close $t} res]
-
-# Write ending sentinel files--we're done.
-set f [open $testdir/end.$childpid w]
-close $f
-set f [open $testdir/end.$parentpid w]
-close $f
-
-error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
- $ret 0
-exit $ret